`_
+
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..5b161e0b37758c4b1874fc458fe3310e2f9fab2d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/RECORD
@@ -0,0 +1,52 @@
+Flask-0.12.2.dist-info/DESCRIPTION.rst,sha256=DmJm8IBlBjl3wkm0Ly23jYvWbvK_mCuE5oUseYCijbI,810
+Flask-0.12.2.dist-info/LICENSE.txt,sha256=hLgKluMRHSnxG-L0EmrqjmKgG5cHlff6pIh3rCNINeI,1582
+Flask-0.12.2.dist-info/METADATA,sha256=OgSkJQ_kmrz4qEkS-OzYtL75uZmXAThymkOcGR4kXRQ,1948
+Flask-0.12.2.dist-info/RECORD,,
+Flask-0.12.2.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
+Flask-0.12.2.dist-info/entry_points.txt,sha256=jzk2Wy2h30uEcqqzd4CVnlzsMXB-vaD5GXjuPMXmTmI,60
+Flask-0.12.2.dist-info/metadata.json,sha256=By8kZ1vY9lLEAGnRiWNBhudqKvLPo0HkZVXTYECyPKk,1389
+Flask-0.12.2.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6
+flask/__init__.py,sha256=sHdK1v6WRbVmCN0fEv990EE7rOT2UlamQkSof2d0Dt0,1673
+flask/__main__.py,sha256=cldbNi5zpjE68XzIWI8uYHNWwBHHVJmwtlXWk6P4CO4,291
+flask/_compat.py,sha256=VlfjUuLjufsTHJIjr_ZsnnOesSbAXIslBBgRe5tfOok,2802
+flask/app.py,sha256=6DPjtb5jUJWgL5fXksG5boA49EB3l-k9pWyftitbNNk,83169
+flask/blueprints.py,sha256=6HVasMcPcaq7tk36kCrgX4bnhTkky4G5WIWCyyJL8HY,16872
+flask/cli.py,sha256=2NXEdCOu5-4ymklxX4Lf6bjb-89I4VHYeP6xScR3i8E,18328
+flask/config.py,sha256=Ym5Jenyu6zAZ1fdVLeKekY9-EsKmq8183qnRgauwCMY,9905
+flask/ctx.py,sha256=UPA0YwoIlHP0txOGanC9lQLSGv6eCqV5Fmw2cVJRmgQ,14739
+flask/debughelpers.py,sha256=z-uQavKIymOZl0WQDLXsnacA00ERIlCx3S3Tnb_OYsE,6024
+flask/exthook.py,sha256=SvXs5jwpcOjogwJ7SNquiWTxowoN1-MHFoqAejWnk2o,5762
+flask/globals.py,sha256=I3m_4RssLhWW1R11zuEI8oFryHUHX3NQwjMkGXOZzg8,1645
+flask/helpers.py,sha256=KrsQ2Yo3lOVHvBTgQCLvpubgmTOpQdTTyiCOOYlwDuQ,38452
+flask/json.py,sha256=1zPM-NPLiWoOfGd0P14FxnEkeKtjtUZxMC9pyYyDBYI,9183
+flask/logging.py,sha256=UG-77jPkRClk9w1B-_ArjjXPuj9AmZz9mG0IRGvptW0,2751
+flask/sessions.py,sha256=QBKXVYKJ-HKbx9m6Yb5yan_EPq84a5yevVLgAzNKFQY,14394
+flask/signals.py,sha256=MfZk5qTRj_R_O3aGYlTEnx2g3SvlZncz8Ii73eKK59g,2209
+flask/templating.py,sha256=u7FbN6j56H_q6CrdJJyJ6gZtqaMa0vh1_GP12gEHRQQ,4912
+flask/testing.py,sha256=II8EO_NjOT1LvL8Hh_SdIFL_BdlwVPcB9yot5pbltxE,5630
+flask/views.py,sha256=6OPv7gwu3h14JhqpeeMRWwrxoGHsUr4_nOGSyTRAxAI,5630
+flask/wrappers.py,sha256=1S_5mmuA1Tlx7D9lXV6xMblrg-PdAauNWahe-henMEE,7612
+flask/ext/__init__.py,sha256=UEezCApsG4ZJWqwUnX9YmWcNN4OVENgph_9L05n0eOM,842
+../../../bin/flask,sha256=igyyGNreXbanNekbFaDgSmwyfzE1HijApcTG8QD9OG0,242
+Flask-0.12.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+flask/config.pyc,,
+flask/testing.pyc,,
+flask/wrappers.pyc,,
+flask/views.pyc,,
+flask/logging.pyc,,
+flask/json.pyc,,
+flask/helpers.pyc,,
+flask/__main__.pyc,,
+flask/blueprints.pyc,,
+flask/signals.pyc,,
+flask/__init__.pyc,,
+flask/exthook.pyc,,
+flask/debughelpers.pyc,,
+flask/templating.pyc,,
+flask/ctx.pyc,,
+flask/cli.pyc,,
+flask/_compat.pyc,,
+flask/app.pyc,,
+flask/globals.pyc,,
+flask/ext/__init__.pyc,,
+flask/sessions.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..8b6dd1b5a884bfc07b5f771ab5dc56ed02323531
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/entry_points.txt b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..14adf18abf5fc0693202a0200e835cef9a828814
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+
+ [console_scripts]
+ flask=flask.cli:main
+
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..4d814b8b3f472cf79ea323f0fdf0392968ae3ebc
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.commands": {"wrap_console": {"flask": "flask.cli:main"}}, "python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/pallets/flask/"}}, "python.exports": {"console_scripts": {"flask": "flask.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask", "platform": "any", "run_requires": [{"requires": ["Jinja2 (>=2.4)", "Werkzeug (>=0.7)", "click (>=2.0)", "itsdangerous (>=0.21)"]}], "summary": "A microframework based on Werkzeug, Jinja2 and good intentions", "version": "0.12.2"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7e1060246fd6746a14204539a72e199a25469a05
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask-0.12.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+flask
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e1187231a3553e6c1bec45c6b5a4e4e62b8ef70d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst
@@ -0,0 +1,3 @@
+UNKNOWN
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..529c46e133fd2f387da3ab92e06a9d20e12da300
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/METADATA
@@ -0,0 +1,31 @@
+Metadata-Version: 2.0
+Name: Flask-RESTful
+Version: 0.3.6
+Summary: Simple framework for creating REST APIs
+Home-page: https://www.github.com/flask-restful/flask-restful/
+Author: Twilio API Team
+Author-email: help@twilio.com
+License: BSD
+Platform: any
+Classifier: Framework :: Flask
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: License :: OSI Approved :: BSD License
+Requires-Dist: Flask (>=0.8)
+Requires-Dist: aniso8601 (>=0.82)
+Requires-Dist: pytz
+Requires-Dist: six (>=1.3.0)
+Provides-Extra: docs
+Requires-Dist: sphinx; extra == 'docs'
+Provides-Extra: paging
+Requires-Dist: pycrypto (>=2.6); extra == 'paging'
+
+UNKNOWN
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..1a17400f896a4c2aeada9e00e9009ebd6184ce40
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/RECORD
@@ -0,0 +1,29 @@
+Flask_RESTful-0.3.6.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
+Flask_RESTful-0.3.6.dist-info/METADATA,sha256=THyvrIu4SUuC9fqpfDBP-m6kdZcesFu2-Gm2oxgJnWM,985
+Flask_RESTful-0.3.6.dist-info/RECORD,,
+Flask_RESTful-0.3.6.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
+Flask_RESTful-0.3.6.dist-info/metadata.json,sha256=1zKAyvYdpplkpX_NJU8vKCZ9nvy0Y_paxRiMJXCNiXU,1178
+Flask_RESTful-0.3.6.dist-info/top_level.txt,sha256=lNpWPlejgBAtMhCUwz_FTyJH12ul1mBZ-Uv3ZK1HiGg,14
+flask_restful/__init__.py,sha256=dpZuahv5GMo2Rof4OWkGpQvIhnJIeOakL_1EHkkzUA4,28510
+flask_restful/__version__.py,sha256=-RHjzBqfdsudCsVzQ9u-AoX7n-_90gcO1RlvPEVk7d4,45
+flask_restful/fields.py,sha256=9cbc0vXaGzt1Ur8gUf3sHlMjNSy1qOSV_qvi-3YQAL8,13051
+flask_restful/inputs.py,sha256=AOBF_1BpB8snY3XJT_vca_uWYzkCP2nla01lFU7xqLE,9114
+flask_restful/paging.py,sha256=H_nL-UlViLSHJhS7NbHJurHMwzxv7IsaAnuTC4L5Kec,1207
+flask_restful/reqparse.py,sha256=I9qxtSqjVzrDTxQtfSHM4yGx3XsiVoa-wMPlA1mod9s,13475
+flask_restful/representations/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+flask_restful/representations/json.py,sha256=swKwnbt7v2ioHfHkqhqbzIu_yrcP0ComlSl49IGFJOo,873
+flask_restful/utils/__init__.py,sha256=Qh5pyCIT2dfHmrUdS6lsMbBLjZmAhz1fl7vWyJ_n4XQ,719
+flask_restful/utils/cors.py,sha256=cZiqaHhIn0w66spRoSIdC-jIn4X_b6OlVms5eGF4Ess,2084
+flask_restful/utils/crypto.py,sha256=q3PBvAYMJYybbqqQlKNF_Pqeyo9h3x5jFJuVqtEA5bA,996
+Flask_RESTful-0.3.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+flask_restful/utils/crypto.pyc,,
+flask_restful/inputs.pyc,,
+flask_restful/utils/__init__.pyc,,
+flask_restful/utils/cors.pyc,,
+flask_restful/__init__.pyc,,
+flask_restful/paging.pyc,,
+flask_restful/__version__.pyc,,
+flask_restful/representations/json.pyc,,
+flask_restful/reqparse.pyc,,
+flask_restful/representations/__init__.pyc,,
+flask_restful/fields.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..8b6dd1b5a884bfc07b5f771ab5dc56ed02323531
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..fd1720a509662192b16ace5078906c49385d77fc
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Framework :: Flask", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "License :: OSI Approved :: BSD License"], "extensions": {"python.details": {"contacts": [{"email": "help@twilio.com", "name": "Twilio API Team", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://www.github.com/flask-restful/flask-restful/"}}}, "extras": ["docs", "paging"], "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-RESTful", "platform": "any", "run_requires": [{"requires": ["Flask (>=0.8)", "aniso8601 (>=0.82)", "pytz", "six (>=1.3.0)"]}, {"extra": "paging", "requires": ["pycrypto (>=2.6)"]}, {"extra": "docs", "requires": ["sphinx"]}], "summary": "Simple framework for creating REST APIs", "test_requires": [{"requires": ["Flask-RESTful[paging]", "blinker", "mock (>=0.8)"]}], "version": "0.3.6"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f7b852702bd62e41e8251c3194d8e191569bb625
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Flask_RESTful-0.3.6.dist-info/top_level.txt
@@ -0,0 +1 @@
+flask_restful
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1594da5ce5fd98bdb9a05462f6f7252e28c74644
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst
@@ -0,0 +1,37 @@
+
+Jinja2
+~~~~~~
+
+Jinja2 is a template engine written in pure Python. It provides a
+`Django`_ inspired non-XML syntax but supports inline expressions and
+an optional `sandboxed`_ environment.
+
+Nutshell
+--------
+
+Here a small example of a Jinja template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+Philosophy
+----------
+
+Application logic is for the controller but don't try to make the life
+for the template designer too hard by giving him too few functionality.
+
+For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
+
+.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
+.. _Django: https://www.djangoproject.com/
+.. _Jinja2 webpage: http://jinja.pocoo.org/
+.. _documentation: http://jinja.pocoo.org/2/documentation/
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..10145a264342b7888ec6accfedc4f2808fb67a0e
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt
@@ -0,0 +1,31 @@
+Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..40f2b46b40668ac2e7641811be66cd1f7f533939
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA
@@ -0,0 +1,68 @@
+Metadata-Version: 2.0
+Name: Jinja2
+Version: 2.10
+Summary: A small but fast and easy to use stand-alone template engine written in pure python.
+Home-page: http://jinja.pocoo.org/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Description-Content-Type: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Dist: MarkupSafe (>=0.23)
+Provides-Extra: i18n
+Requires-Dist: Babel (>=0.8); extra == 'i18n'
+
+
+Jinja2
+~~~~~~
+
+Jinja2 is a template engine written in pure Python. It provides a
+`Django`_ inspired non-XML syntax but supports inline expressions and
+an optional `sandboxed`_ environment.
+
+Nutshell
+--------
+
+Here a small example of a Jinja template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+Philosophy
+----------
+
+Application logic is for the controller but don't try to make the life
+for the template designer too hard by giving him too few functionality.
+
+For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
+
+.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
+.. _Django: https://www.djangoproject.com/
+.. _Jinja2 webpage: http://jinja.pocoo.org/
+.. _documentation: http://jinja.pocoo.org/2/documentation/
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..5634a3df9c6e8038cca5ae6426fb27954b5038ed
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD
@@ -0,0 +1,61 @@
+Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978
+Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
+Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258
+Jinja2-2.10.dist-info/RECORD,,
+Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
+Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
+Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473
+Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614
+jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
+jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
+jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
+jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
+jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794
+jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
+jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
+jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045
+jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
+jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
+jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
+jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
+jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
+jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
+jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
+jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
+jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
+jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
+jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
+jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
+jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
+jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755
+jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708
+jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237
+jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629
+jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
+Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+jinja2/_compat.pyc,,
+jinja2/sandbox.pyc,,
+jinja2/environment.pyc,,
+jinja2/ext.pyc,,
+jinja2/runtime.pyc,,
+jinja2/utils.pyc,,
+jinja2/parser.pyc,,
+jinja2/debug.pyc,,
+jinja2/lexer.pyc,,
+jinja2/defaults.pyc,,
+jinja2/visitor.pyc,,
+jinja2/bccache.pyc,,
+jinja2/_identifier.pyc,,
+jinja2/nodes.pyc,,
+jinja2/compiler.pyc,,
+jinja2/nativetypes.pyc,,
+jinja2/exceptions.pyc,,
+jinja2/__init__.pyc,,
+jinja2/loaders.pyc,,
+jinja2/optimizer.pyc,,
+jinja2/filters.pyc,,
+jinja2/tests.pyc,,
+jinja2/meta.pyc,,
+jinja2/idtracking.pyc,,
+jinja2/constants.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7332a419cda6903b61439f3bac93492b0747e6e7
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..32e6b7530284307358fa869cf232318221f791fd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+
+ [babel.extractors]
+ jinja2 = jinja2.ext:babel_extract[i18n]
+
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f5dc3879d9320ee95e12f257f83222dd331a304
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinja2
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..495211bd12dec44dde597e310bf546c6cc26595f
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/DESCRIPTION.rst
@@ -0,0 +1,115 @@
+MarkupSafe
+==========
+
+Implements a unicode subclass that supports HTML strings:
+
+.. code-block:: python
+
+ >>> from markupsafe import Markup, escape
+ >>> escape("")
+ Markup(u'<script>alert(document.cookie);</script>')
+ >>> tmpl = Markup("%s ")
+ >>> tmpl % "Peter > Lustig"
+ Markup(u'Peter > Lustig ')
+
+If you want to make an object unicode that is not yet unicode
+but don't want to lose the taint information, you can use the
+``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
+is a different name for the same function).
+
+.. code-block:: python
+
+ >>> from markupsafe import soft_unicode
+ >>> soft_unicode(42)
+ u'42'
+ >>> soft_unicode(Markup('foo'))
+ Markup(u'foo')
+
+HTML Representations
+--------------------
+
+Objects can customize their HTML markup equivalent by overriding
+the ``__html__`` function:
+
+.. code-block:: python
+
+ >>> class Foo(object):
+ ... def __html__(self):
+ ... return 'Nice '
+ ...
+ >>> escape(Foo())
+ Markup(u'Nice ')
+ >>> Markup(Foo())
+ Markup(u'Nice ')
+
+Silent Escapes
+--------------
+
+Since MarkupSafe 0.10 there is now also a separate escape function
+called ``escape_silent`` that returns an empty string for ``None`` for
+consistency with other systems that return empty strings for ``None``
+when escaping (for instance Pylons' webhelpers).
+
+If you also want to use this for the escape method of the Markup
+object, you can create your own subclass that does that:
+
+.. code-block:: python
+
+ from markupsafe import Markup, escape_silent as escape
+
+ class SilentMarkup(Markup):
+ __slots__ = ()
+
+ @classmethod
+ def escape(cls, s):
+ return cls(escape(s))
+
+New-Style String Formatting
+---------------------------
+
+Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
+3.x are now fully supported. Previously the escape behavior of those
+functions was spotty at best. The new implementations operates under the
+following algorithm:
+
+1. if an object has an ``__html_format__`` method it is called as
+ replacement for ``__format__`` with the format specifier. It either
+ has to return a string or markup object.
+2. if an object has an ``__html__`` method it is called.
+3. otherwise the default format system of Python kicks in and the result
+ is HTML escaped.
+
+Here is how you can implement your own formatting:
+
+.. code-block:: python
+
+ class User(object):
+
+ def __init__(self, id, username):
+ self.id = id
+ self.username = username
+
+ def __html_format__(self, format_spec):
+ if format_spec == 'link':
+ return Markup('{1} ').format(
+ self.id,
+ self.__html__(),
+ )
+ elif format_spec:
+ raise ValueError('Invalid format spec')
+ return self.__html__()
+
+ def __html__(self):
+ return Markup('{0} ').format(self.username)
+
+And to format that user:
+
+.. code-block:: python
+
+ >>> user = User(1, 'foo')
+ >>> Markup('User: {0:link}').format(user)
+ Markup(u'
User: foo ')
+
+Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.
+
+
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE.txt b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5d2693890dddc34129973f5613afd88767213b24
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE.txt
@@ -0,0 +1,33 @@
+Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS
+for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the software as well
+as documentation, with or without modification, are permitted provided
+that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+* The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..52e50fd66e6f0b59a9ea289b72af0861df9a55dd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA
@@ -0,0 +1,136 @@
+Metadata-Version: 2.0
+Name: MarkupSafe
+Version: 1.0
+Summary: Implements a XML/HTML/XHTML Markup safe string for Python
+Home-page: http://github.com/pallets/markupsafe
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Description-Content-Type: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+
+MarkupSafe
+==========
+
+Implements a unicode subclass that supports HTML strings:
+
+.. code-block:: python
+
+ >>> from markupsafe import Markup, escape
+ >>> escape("")
+ Markup(u'<script>alert(document.cookie);</script>')
+ >>> tmpl = Markup("%s ")
+ >>> tmpl % "Peter > Lustig"
+ Markup(u'Peter > Lustig ')
+
+If you want to make an object unicode that is not yet unicode
+but don't want to lose the taint information, you can use the
+``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
+is a different name for the same function).
+
+.. code-block:: python
+
+ >>> from markupsafe import soft_unicode
+ >>> soft_unicode(42)
+ u'42'
+ >>> soft_unicode(Markup('foo'))
+ Markup(u'foo')
+
+HTML Representations
+--------------------
+
+Objects can customize their HTML markup equivalent by overriding
+the ``__html__`` function:
+
+.. code-block:: python
+
+ >>> class Foo(object):
+ ... def __html__(self):
+ ... return 'Nice '
+ ...
+ >>> escape(Foo())
+ Markup(u'Nice ')
+ >>> Markup(Foo())
+ Markup(u'Nice ')
+
+Silent Escapes
+--------------
+
+Since MarkupSafe 0.10 there is now also a separate escape function
+called ``escape_silent`` that returns an empty string for ``None`` for
+consistency with other systems that return empty strings for ``None``
+when escaping (for instance Pylons' webhelpers).
+
+If you also want to use this for the escape method of the Markup
+object, you can create your own subclass that does that:
+
+.. code-block:: python
+
+ from markupsafe import Markup, escape_silent as escape
+
+ class SilentMarkup(Markup):
+ __slots__ = ()
+
+ @classmethod
+ def escape(cls, s):
+ return cls(escape(s))
+
+New-Style String Formatting
+---------------------------
+
+Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
+3.x are now fully supported. Previously the escape behavior of those
+functions was spotty at best. The new implementations operates under the
+following algorithm:
+
+1. if an object has an ``__html_format__`` method it is called as
+ replacement for ``__format__`` with the format specifier. It either
+ has to return a string or markup object.
+2. if an object has an ``__html__`` method it is called.
+3. otherwise the default format system of Python kicks in and the result
+ is HTML escaped.
+
+Here is how you can implement your own formatting:
+
+.. code-block:: python
+
+ class User(object):
+
+ def __init__(self, id, username):
+ self.id = id
+ self.username = username
+
+ def __html_format__(self, format_spec):
+ if format_spec == 'link':
+ return Markup('{1} ').format(
+ self.id,
+ self.__html__(),
+ )
+ elif format_spec:
+ raise ValueError('Invalid format spec')
+ return self.__html__()
+
+ def __html__(self):
+ return Markup('{0} ').format(self.username)
+
+And to format that user:
+
+.. code-block:: python
+
+ >>> user = User(1, 'foo')
+ >>> Markup('
User: {0:link}').format(user)
+ Markup(u'
User: foo ')
+
+Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.
+
+
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..73cfb5382beeac4ad444934cffa23d7fdfbe99e9
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD
@@ -0,0 +1,18 @@
+MarkupSafe-1.0.dist-info/DESCRIPTION.rst,sha256=3B3J0YLzzmJQVaWQ_XlVMhGeHA_DvBqysABvul_5fko,3397
+MarkupSafe-1.0.dist-info/LICENSE.txt,sha256=C76IIo_WPSDsCX9k5Y1aCkZRI64TkUChjUBsYLSIJLU,1582
+MarkupSafe-1.0.dist-info/METADATA,sha256=OaqNJuUium8313pk7v9-1zJSn_q881dXm0NbnrwibIs,4216
+MarkupSafe-1.0.dist-info/RECORD,,
+MarkupSafe-1.0.dist-info/WHEEL,sha256=SbDL06p73vwui0yN_WleK4ikNZmZXkWtuZM0WWDCUhU,105
+MarkupSafe-1.0.dist-info/metadata.json,sha256=G6pFEVAWSjETqEGYgPlkcKKk6ZSpi6MGtPhoZccUZFw,963
+MarkupSafe-1.0.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
+markupsafe/__init__.py,sha256=xtkRdxhzJzgp65wUo1D4DjnazxHU88pPldaAuDekBeY,10697
+markupsafe/_compat.py,sha256=r1HE0CpcAZeb-AiTV9wITR91PeLHn0CzZ_XHkYoozpI,565
+markupsafe/_constants.py,sha256=U_xybFQsyXKCgHSfranJnFzo-z9nn9fuBeSk243sE5Q,4795
+markupsafe/_native.py,sha256=E2Un1ysOf-w45d18YCj8UelT5UP7Vt__IuFPYJ7YRIs,1187
+markupsafe/_speedups.c,sha256=B6Mf6Fn33WqkagfwY7q5ZBSm_vJoHDYxDB0Jp_DP7Jw,5936
+markupsafe/_speedups.so,sha256=tOKAsdZDAlIzVa5Vj3BoOLuE7r6oAcJ1EY1_TOjFeFw,28304
+MarkupSafe-1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+markupsafe/_native.pyc,,
+markupsafe/_constants.pyc,,
+markupsafe/_compat.pyc,,
+markupsafe/__init__.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..93970e3ce12f71d5a36139bd22f78bc08c92d192
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0)
+Root-Is-Purelib: false
+Tag: cp27-cp27mu-linux_x86_64
+
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..cbd7a5078ba7963e2bbd75d13c5cbd3f2ecd7d80
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/pallets/markupsafe"}}}, "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "MarkupSafe", "summary": "Implements a XML/HTML/XHTML Markup safe string for Python", "version": "1.0"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3e3abb7f29879ba545af836883f5aa8a4745c7c2
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/DESCRIPTION.rst
@@ -0,0 +1,54 @@
+Werkzeug
+========
+
+Werkzeug started as simple collection of various utilities for WSGI
+applications and has become one of the most advanced WSGI utility
+modules. It includes a powerful debugger, full featured request and
+response objects, HTTP utilities to handle entity tags, cache control
+headers, HTTP dates, cookie handling, file uploads, a powerful URL
+routing system and a bunch of community contributed addon modules.
+
+Werkzeug is unicode aware and doesn't enforce a specific template
+engine, database adapter or anything else. It doesn't even enforce
+a specific way of handling requests and leaves all that up to the
+developer. It's most useful for end user applications which should work
+on as many server environments as possible (such as blogs, wikis,
+bulletin boards, etc.).
+
+Details and example applications are available on the
+`Werkzeug website `_.
+
+
+Features
+--------
+
+- unicode awareness
+
+- request and response objects
+
+- various utility functions for dealing with HTTP headers such as
+ `Accept` and `Cache-Control` headers.
+
+- thread local objects with proper cleanup at request end
+
+- an interactive debugger
+
+- A simple WSGI server with support for threading and forking
+ with an automatic reloader.
+
+- a flexible URL routing system with REST support.
+
+- fully WSGI compatible
+
+
+Development Version
+-------------------
+
+The Werkzeug development version can be installed by cloning the git
+repository from `github`_::
+
+ git clone git@github.com:pallets/werkzeug.git
+
+.. _github: http://github.com/pallets/werkzeug
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/LICENSE.txt b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1c2e0b7dc6db835268a10f6e7e34733786b6e986
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/LICENSE.txt
@@ -0,0 +1,29 @@
+Copyright (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..52e12a4f0db2cb2a2494206f3244e84329614281
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/METADATA
@@ -0,0 +1,83 @@
+Metadata-Version: 2.0
+Name: Werkzeug
+Version: 0.12.2
+Summary: The Swiss Army knife of Python web development
+Home-page: http://werkzeug.pocoo.org/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Provides-Extra: termcolor
+Requires-Dist: termcolor; extra == 'termcolor'
+Provides-Extra: watchdog
+Requires-Dist: watchdog; extra == 'watchdog'
+
+Werkzeug
+========
+
+Werkzeug started as simple collection of various utilities for WSGI
+applications and has become one of the most advanced WSGI utility
+modules. It includes a powerful debugger, full featured request and
+response objects, HTTP utilities to handle entity tags, cache control
+headers, HTTP dates, cookie handling, file uploads, a powerful URL
+routing system and a bunch of community contributed addon modules.
+
+Werkzeug is unicode aware and doesn't enforce a specific template
+engine, database adapter or anything else. It doesn't even enforce
+a specific way of handling requests and leaves all that up to the
+developer. It's most useful for end user applications which should work
+on as many server environments as possible (such as blogs, wikis,
+bulletin boards, etc.).
+
+Details and example applications are available on the
+`Werkzeug website `_.
+
+
+Features
+--------
+
+- unicode awareness
+
+- request and response objects
+
+- various utility functions for dealing with HTTP headers such as
+ `Accept` and `Cache-Control` headers.
+
+- thread local objects with proper cleanup at request end
+
+- an interactive debugger
+
+- A simple WSGI server with support for threading and forking
+ with an automatic reloader.
+
+- a flexible URL routing system with REST support.
+
+- fully WSGI compatible
+
+
+Development Version
+-------------------
+
+The Werkzeug development version can be installed by cloning the git
+repository from `github`_::
+
+ git clone git@github.com:pallets/werkzeug.git
+
+.. _github: http://github.com/pallets/werkzeug
+
+
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..12866689901f7944b7e73877855fbbfbdfb073d8
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/RECORD
@@ -0,0 +1,95 @@
+werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519
+werkzeug/security.py,sha256=Z0v0ojdo7T4FNyfIjx86BFQKwasy3ZR9euikIJDQYP8,9191
+werkzeug/__init__.py,sha256=NDY8HsYsT3dguTLu4MhuH-GpQE5XS9aKhrdfwHnzOEk,6864
+werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396
+werkzeug/http.py,sha256=nrk-ASJzcKOuoBEz274TWA8jKt0CQSOBZuP_A0UASTA,36658
+werkzeug/routing.py,sha256=g25wg0GNfff8WcfRlc1ZxTGvz1KbVj09w2S7wxopseQ,66746
+werkzeug/utils.py,sha256=lkybtv_mq35zV1qhelvEcILTzrMUwZ9yon6E8XwapJE,22972
+werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505
+werkzeug/_reloader.py,sha256=NkIXQCTa6b22wWLpXob_jIVUxux8LtAsfWehLkKt0iM,8816
+werkzeug/formparser.py,sha256=DxN53eOCb6i7PxqtldrF2Kv9Mx00BqW297N4t-RxkWE,21241
+werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311
+werkzeug/datastructures.py,sha256=rq0zICISMUetS3xvUVvrhIvyue9oUzrs_NU3b83zwuQ,89066
+werkzeug/wrappers.py,sha256=wceh1RhvhIZVzKuok3XMQ5jqjYYCEYv5JqKY3Nc_oRY,82986
+werkzeug/test.py,sha256=xnabNSpty66ftZiXHcoZaYFP1E4WUNxydw5Oe8Mjhoo,34795
+werkzeug/urls.py,sha256=fSbI4Gb29_p02Zk21VAZQRN1QdOVY9CNTgpb2rbajNQ,36710
+werkzeug/script.py,sha256=Jh9OAktqjLNc_IBBUatVM7uP5LDcbxaYA8n2ObnS4bo,11666
+werkzeug/useragents.py,sha256=Ck3G977Y0Rzdk9wFcLpL0PyOrONtdK1_d2Zexb78cX4,5640
+werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553
+werkzeug/serving.py,sha256=aAS3EgiD-VjemsYfSf1yqdjaGEfpB4I3M4PKlLotJLo,29069
+werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175
+werkzeug/wsgi.py,sha256=TjPo5ups3NI1RVVGdMvd3XaceqFtqlMX5X169gWWFrQ,42838
+werkzeug/_internal.py,sha256=sE2JbLnMzN9mRI1iipTYWrFAGEWaZVECqtHAiNEhqUE,13841
+werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179
+werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334
+werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623
+werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453
+werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814
+werkzeug/contrib/cache.py,sha256=nyUUxsS0MTHiFmu-481y9PHd8NvWH5pzCoEX1yA0mHY,30341
+werkzeug/contrib/securecookie.py,sha256=bDsAJmslkwmXjycnPjEjWtfLBvhz0ud4z3k7tdezUVs,12174
+werkzeug/contrib/lint.py,sha256=qZlmqiWJ5tQJOEzLnPmHWA8eUEpcBIWkAb_V2RKJg4o,12558
+werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151
+werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398
+werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575
+werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564
+werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577
+werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607
+werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340
+werkzeug/debug/__init__.py,sha256=GTsOsjE3PqUAlsUVm2Mgc_KWA2kjjSsUz0JsM7Qu41w,17266
+werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451
+werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191
+werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818
+werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264
+werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270
+werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507
+werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673
+werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957
+werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220
+werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200
+Werkzeug-0.12.2.dist-info/DESCRIPTION.rst,sha256=z9r9xqJ0fYSAn1Tz7KRBdFGDerL2y4pHWSW_72pUgTc,1591
+Werkzeug-0.12.2.dist-info/metadata.json,sha256=6taKobd3cQ5zOY5MVKlvuCJGaX7VPLaHYuRwzwwkORI,1276
+Werkzeug-0.12.2.dist-info/RECORD,,
+Werkzeug-0.12.2.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9
+Werkzeug-0.12.2.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+Werkzeug-0.12.2.dist-info/LICENSE.txt,sha256=F84h8-PZAuC-Hq-_252D3yhH6mqIc-WUbXUPbfOtjXM,1532
+Werkzeug-0.12.2.dist-info/METADATA,sha256=SphYykCCskmOJK7mV1-M2T1PTOrx5K3DJ8n3E5jA298,2738
+Werkzeug-0.12.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+werkzeug/_reloader.pyc,,
+werkzeug/filesystem.pyc,,
+werkzeug/contrib/testtools.pyc,,
+werkzeug/formparser.pyc,,
+werkzeug/_compat.pyc,,
+werkzeug/posixemulation.pyc,,
+werkzeug/wsgi.pyc,,
+werkzeug/serving.pyc,,
+werkzeug/contrib/__init__.pyc,,
+werkzeug/contrib/iterio.pyc,,
+werkzeug/test.pyc,,
+werkzeug/__init__.pyc,,
+werkzeug/contrib/limiter.pyc,,
+werkzeug/debug/tbtools.pyc,,
+werkzeug/contrib/sessions.pyc,,
+werkzeug/local.pyc,,
+werkzeug/utils.pyc,,
+werkzeug/_internal.pyc,,
+werkzeug/security.pyc,,
+werkzeug/contrib/cache.pyc,,
+werkzeug/contrib/securecookie.pyc,,
+werkzeug/script.pyc,,
+werkzeug/routing.pyc,,
+werkzeug/wrappers.pyc,,
+werkzeug/contrib/jsrouting.pyc,,
+werkzeug/contrib/fixers.pyc,,
+werkzeug/contrib/profiler.pyc,,
+werkzeug/debug/console.pyc,,
+werkzeug/debug/__init__.pyc,,
+werkzeug/datastructures.pyc,,
+werkzeug/http.pyc,,
+werkzeug/urls.pyc,,
+werkzeug/contrib/lint.pyc,,
+werkzeug/contrib/wrappers.pyc,,
+werkzeug/exceptions.pyc,,
+werkzeug/contrib/atom.pyc,,
+werkzeug/testapp.pyc,,
+werkzeug/useragents.pyc,,
+werkzeug/debug/repr.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..9dff69d86102d36169ccf42a9e71885d3dc17155
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..d77fbb8dd486083b6e234075d8fa53c434327142
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/metadata.json
@@ -0,0 +1 @@
+{"license": "BSD", "name": "Werkzeug", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "The Swiss Army knife of Python web development", "platform": "any", "run_requires": [{"requires": ["watchdog"], "extra": "watchdog"}, {"requires": ["termcolor"], "extra": "termcolor"}], "version": "0.12.2", "extensions": {"python.details": {"project_urls": {"Home": "http://werkzeug.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extras": ["termcolor", "watchdog"]}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6fe8da8499399d7c0484847967ad49e4b165589b
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/Werkzeug-0.12.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+werkzeug
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c3b6813247ff1d99ac1eba14efe21d0ab3330a1a
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/DESCRIPTION.rst
@@ -0,0 +1,331 @@
+===========
+ aniso8601
+===========
+
+----------------------------------
+Another ISO 8601 parser for Python
+----------------------------------
+
+Features
+========
+* Pure Python implementation
+* Python 3 support
+* Logical behavior
+
+ - Parse a time, get a `datetime.time `_
+ - Parse a date, get a `datetime.date `_
+ - Parse a datetime, get a `datetime.datetime `_
+ - Parse a duration, get a `datetime.timedelta `_
+ - Parse an interval, get a tuple of dates or datetimes
+ - Parse a repeating interval, get a date or datetime `generator `_
+
+* UTC offset represented as fixed-offset tzinfo
+* `dateutil.relativedelta `_ available for calendar accuracy
+* No regular expressions
+
+Installation
+============
+
+The recommended installation method is to use pip::
+
+ $ pip install aniso8601
+
+Alternatively, you can download the source (git repository hosted at `Bitbucket `_) and install directly::
+
+ $ python setup.py install
+
+Use
+===
+
+Parsing datetimes
+-----------------
+
+To parse a typical ISO 8601 datetime string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_datetime('1977-06-10T12:00:00Z')
+ datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC)
+
+Alternative delimiters can be specified, for example, a space::
+
+ >>> aniso8601.parse_datetime('1977-06-10 12:00:00Z', delimiter=' ')
+ datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC)
+
+UTC offsets are supported::
+
+ >>> aniso8601.parse_datetime('1979-06-05T08:00:00-08:00')
+ datetime.datetime(1979, 6, 5, 8, 0, tzinfo=-8:00:00 UTC)
+
+If a UTC offset is not specified, the returned datetime will be naive::
+
+ >>> aniso8601.parse_datetime('1983-01-22T08:00:00')
+ datetime.datetime(1983, 1, 22, 8, 0)
+
+Parsing dates
+-------------
+
+To parse a date represented in an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_date('1984-04-23')
+ datetime.date(1984, 4, 23)
+
+Basic format is supported as well::
+
+ >>> aniso8601.parse_date('19840423')
+ datetime.date(1984, 4, 23)
+
+To parse a date using the ISO 8601 week date format::
+
+ >>> aniso8601.parse_date('1986-W38-1')
+ datetime.date(1986, 9, 15)
+
+To parse an ISO 8601 ordinal date::
+
+ >>> aniso8601.parse_date('1988-132')
+ datetime.date(1988, 5, 11)
+
+Parsing times
+-------------
+
+To parse a time formatted as an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_time('11:31:14')
+ datetime.time(11, 31, 14)
+
+As with all of the above, basic format is supported::
+
+ >>> aniso8601.parse_time('113114')
+ datetime.time(11, 31, 14)
+
+A UTC offset can be specified for times::
+
+ >>> aniso8601.parse_time('17:18:19-02:30')
+ datetime.time(17, 18, 19, tzinfo=-2:30:00 UTC)
+ >>> aniso8601.parse_time('171819Z')
+ datetime.time(17, 18, 19, tzinfo=+0:00:00 UTC)
+
+Reduced accuracy is supported::
+
+ >>> aniso8601.parse_time('21:42')
+ datetime.time(21, 42)
+ >>> aniso8601.parse_time('22')
+ datetime.time(22, 0)
+
+A decimal fraction is always allowed on the lowest order element of an ISO 8601 formatted time::
+
+ >>> aniso8601.parse_time('22:33.5')
+ datetime.time(22, 33, 30)
+ >>> aniso8601.parse_time('23.75')
+ datetime.time(23, 45)
+
+Leap seconds are currently not supported::
+
+ >>> aniso8601.parse_time('21:42:60')
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/time.py", line 107, in parse_time
+ return _parse_time_naive(timestr)
+ File "aniso8601/time.py", line 136, in _parse_time_naive
+ return _resolution_map[get_time_resolution(timestr)](timestr)
+ File "aniso8601/time.py", line 199, in _parse_second_time
+ raise ValueError('Seconds must be less than 60.')
+ ValueError: Seconds must be less than 60.
+
+Parsing durations
+-----------------
+
+To parse a duration formatted as an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_duration('P1Y2M3DT4H54M6S')
+ datetime.timedelta(428, 17646)
+
+Reduced accuracy is supported::
+
+ >>> aniso8601.parse_duration('P1Y')
+ datetime.timedelta(365)
+
+A decimal fraction is allowed on the lowest order element::
+
+ >>> aniso8601.parse_duration('P1YT3.5M')
+ datetime.timedelta(365, 210)
+
+The decimal fraction can be specified with a comma instead of a full-stop::
+
+ >>> aniso8601.parse_duration('P1YT3,5M')
+ datetime.timedelta(365, 210)
+
+Parsing a duration from a combined date and time is supported as well::
+
+ >>> aniso8601.parse_duration('P0001-01-02T01:30:5')
+ datetime.timedelta(397, 5405)
+
+The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used::
+
+ >>> import aniso8601
+ >>> from datetime import date
+ >>> one_month = aniso8601.parse_duration('P1M', relative=True)
+ >>> print one_month
+ relativedelta(months=+1)
+ >>> date(2003,1,27) + one_month
+ datetime.date(2003, 2, 27)
+ >>> date(2003,1,31) + one_month
+ datetime.date(2003, 2, 28)
+ >>> date(2003,1,31) + two_months
+ datetime.date(2003, 3, 31)
+
+Since a relative fractional month or year is not logical, a ValueError is raised when attempting to parse a duration with :code:`relative=True` and fractional month or year::
+
+ >>> aniso8601.parse_duration('P2.1Y', relative=True)
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/duration.py", line 29, in parse_duration
+ return _parse_duration_prescribed(isodurationstr, relative)
+ File "aniso8601/duration.py", line 150, in _parse_duration_prescribed
+ raise ValueError('Fractional months and years are not defined for relative intervals.')
+ ValueError: Fractional months and years are not defined for relative intervals.
+
+Parsing intervals
+-----------------
+
+To parse an interval specified by a start and end::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_interval('2007-03-01T13:00:00/2008-05-11T15:30:00')
+ (datetime.datetime(2007, 3, 1, 13, 0), datetime.datetime(2008, 5, 11, 15, 30))
+
+Intervals specified by a start time and a duration are supported::
+
+ >>> aniso8601.parse_interval('2007-03-01T13:00:00Z/P1Y2M10DT2H30M')
+ (datetime.datetime(2007, 3, 1, 13, 0, tzinfo=+0:00:00 UTC), datetime.datetime(2008, 5, 9, 15, 30, tzinfo=+0:00:00 UTC))
+
+A duration can also be specified by a duration and end time::
+
+ >>> aniso8601.parse_interval('P1M/1981-04-05')
+ (datetime.date(1981, 4, 5), datetime.date(1981, 3, 6))
+
+Notice that the result of the above parse is not in order from earliest to latest. If sorted intervals are required, simply use the 'sorted' keyword as shown below::
+
+ >>> sorted(aniso8601.parse_interval('P1M/1981-04-05'))
+ [datetime.date(1981, 3, 6), datetime.date(1981, 4, 5)]
+
+The end of an interval is given as a datetime when required to maintain the resolution specified by a duration, even if the duration start is given as a date::
+
+ >>> aniso8601.parse_interval('2014-11-12/PT4H54M6.5S')
+ (datetime.date(2014, 11, 12), datetime.datetime(2014, 11, 12, 4, 54, 6, 500000))
+
+Repeating intervals are supported as well, and return a generator::
+
+ >>> aniso8601.parse_repeating_interval('R3/1981-04-05/P1D')
+
+ >>> list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D'))
+ [datetime.date(1981, 4, 5), datetime.date(1981, 4, 6), datetime.date(1981, 4, 7)]
+
+Repeating intervals are allowed to go in the reverse direction::
+
+ >>> list(aniso8601.parse_repeating_interval('R2/PT1H2M/1980-03-05T01:01:00'))
+ [datetime.datetime(1980, 3, 5, 1, 1), datetime.datetime(1980, 3, 4, 23, 59)]
+
+Unbounded intervals are also allowed (Python 2)::
+
+ >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
+ >>> result.next()
+ datetime.datetime(1980, 3, 5, 1, 1)
+ >>> result.next()
+ datetime.datetime(1980, 3, 4, 23, 59)
+
+or for Python 3::
+
+ >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
+ >>> next(result)
+ datetime.datetime(1980, 3, 5, 1, 1)
+ >>> next(result)
+ datetime.datetime(1980, 3, 4, 23, 59)
+
+Note that you should never try to convert a generator produced by an unbounded interval to a list::
+
+ >>> list(aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00'))
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/interval.py", line 156, in _date_generator_unbounded
+ currentdate += timedelta
+ OverflowError: date value out of range
+
+The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used::
+
+ >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True)
+ (datetime.date(2003, 1, 27), datetime.date(2003, 2, 27))
+ >>> aniso8601.parse_interval('2003-01-31/P1M', relative=True)
+ (datetime.date(2003, 1, 31), datetime.date(2003, 2, 28))
+ >>> aniso8601.parse_interval('P1Y/2001-02-28', relative=True)
+ (datetime.date(2001, 2, 28), datetime.date(2000, 2, 28)
+
+Fractional years and months do not make sense for relative intervals. A ValueError is raised when attempting to parse an interval with :code:`relative=True` and a fractional month or year::
+
+ >>> aniso8601.parse_interval('P1.1Y/2001-02-28', relative=True)
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/interval.py", line 33, in parse_interval
+ interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative)
+ File "aniso8601/interval.py", line 84, in _parse_interval_parts
+ duration = parse_duration(firstpart, relative=relative)
+ File "aniso8601/duration.py", line 29, in parse_duration
+ return _parse_duration_prescribed(isodurationstr, relative)
+ File "aniso8601/duration.py", line 150, in _parse_duration_prescribed
+ raise ValueError('Fractional months and years are not defined for relative intervals.')
+ ValueError: Fractional months and years are not defined for relative intervals.
+
+Date and time resolution
+------------------------
+
+In some situations, it may be useful to figure out the resolution provided by an ISO 8601 date or time string. Two functions are provided for this purpose.
+
+To get the resolution of a ISO 8601 time string::
+
+ >>> aniso8601.get_time_resolution('11:31:14') == aniso8601.resolution.TimeResolution.Seconds
+ True
+ >>> aniso8601.get_time_resolution('11:31') == aniso8601.resolution.TimeResolution.Minutes
+ True
+ >>> aniso8601.get_time_resolution('11') == aniso8601.resolution.TimeResolution.Hours
+ True
+
+Similarly, for an ISO 8601 date string::
+
+ >>> aniso8601.get_date_resolution('1981-04-05') == aniso8601.resolution.DateResolution.Day
+ True
+ >>> aniso8601.get_date_resolution('1981-04') == aniso8601.resolution.DateResolution.Month
+ True
+ >>> aniso8601.get_date_resolution('1981') == aniso8601.resolution.DateResolution.Year
+ True
+
+Tests
+=====
+
+To run the unit tests, navigate to the source directory and run the tests for the python version being worked on (python2, python3)::
+
+ $ python2 -m unittest discover aniso8601/tests/
+
+or::
+
+ $ python3 -m unittest discover aniso8601/tests/
+
+Contributing
+============
+
+aniso8601 is an open source project hosted on `Bitbucket `_.
+
+Any and all bugs are welcome on our `issue tracker `_.
+Of particular interest are valid ISO 8601 strings that don't parse, or invalid ones that do. At a minimum,
+bug reports should include an example of the misbehaving string, as well as the expected result. Of course
+patches containing unit tests (or fixed bugs) are welcome!
+
+References
+==========
+
+* `ISO 8601:2004(E) `_ (Caution, PDF link)
+* `Wikipedia article on ISO 8601 `_
+* `Discussion on alternative ISO 8601 parsers for Python `_
+
+
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..52570d75ee37f832d072a712c1fa31bce2381269
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/METADATA
@@ -0,0 +1,350 @@
+Metadata-Version: 2.0
+Name: aniso8601
+Version: 1.3.0
+Summary: A library for parsing ISO 8601 strings.
+Home-page: https://bitbucket.org/nielsenb/aniso8601
+Author: Brandon Nielsen
+Author-email: nielsenb@jetfuse.net
+License: UNKNOWN
+Description-Content-Type: UNKNOWN
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Dist: python-dateutil
+
+===========
+ aniso8601
+===========
+
+----------------------------------
+Another ISO 8601 parser for Python
+----------------------------------
+
+Features
+========
+* Pure Python implementation
+* Python 3 support
+* Logical behavior
+
+ - Parse a time, get a `datetime.time `_
+ - Parse a date, get a `datetime.date `_
+ - Parse a datetime, get a `datetime.datetime `_
+ - Parse a duration, get a `datetime.timedelta `_
+ - Parse an interval, get a tuple of dates or datetimes
+ - Parse a repeating interval, get a date or datetime `generator `_
+
+* UTC offset represented as fixed-offset tzinfo
+* `dateutil.relativedelta `_ available for calendar accuracy
+* No regular expressions
+
+Installation
+============
+
+The recommended installation method is to use pip::
+
+ $ pip install aniso8601
+
+Alternatively, you can download the source (git repository hosted at `Bitbucket `_) and install directly::
+
+ $ python setup.py install
+
+Use
+===
+
+Parsing datetimes
+-----------------
+
+To parse a typical ISO 8601 datetime string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_datetime('1977-06-10T12:00:00Z')
+ datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC)
+
+Alternative delimiters can be specified, for example, a space::
+
+ >>> aniso8601.parse_datetime('1977-06-10 12:00:00Z', delimiter=' ')
+ datetime.datetime(1977, 6, 10, 12, 0, tzinfo=+0:00:00 UTC)
+
+UTC offsets are supported::
+
+ >>> aniso8601.parse_datetime('1979-06-05T08:00:00-08:00')
+ datetime.datetime(1979, 6, 5, 8, 0, tzinfo=-8:00:00 UTC)
+
+If a UTC offset is not specified, the returned datetime will be naive::
+
+ >>> aniso8601.parse_datetime('1983-01-22T08:00:00')
+ datetime.datetime(1983, 1, 22, 8, 0)
+
+Parsing dates
+-------------
+
+To parse a date represented in an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_date('1984-04-23')
+ datetime.date(1984, 4, 23)
+
+Basic format is supported as well::
+
+ >>> aniso8601.parse_date('19840423')
+ datetime.date(1984, 4, 23)
+
+To parse a date using the ISO 8601 week date format::
+
+ >>> aniso8601.parse_date('1986-W38-1')
+ datetime.date(1986, 9, 15)
+
+To parse an ISO 8601 ordinal date::
+
+ >>> aniso8601.parse_date('1988-132')
+ datetime.date(1988, 5, 11)
+
+Parsing times
+-------------
+
+To parse a time formatted as an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_time('11:31:14')
+ datetime.time(11, 31, 14)
+
+As with all of the above, basic format is supported::
+
+ >>> aniso8601.parse_time('113114')
+ datetime.time(11, 31, 14)
+
+A UTC offset can be specified for times::
+
+ >>> aniso8601.parse_time('17:18:19-02:30')
+ datetime.time(17, 18, 19, tzinfo=-2:30:00 UTC)
+ >>> aniso8601.parse_time('171819Z')
+ datetime.time(17, 18, 19, tzinfo=+0:00:00 UTC)
+
+Reduced accuracy is supported::
+
+ >>> aniso8601.parse_time('21:42')
+ datetime.time(21, 42)
+ >>> aniso8601.parse_time('22')
+ datetime.time(22, 0)
+
+A decimal fraction is always allowed on the lowest order element of an ISO 8601 formatted time::
+
+ >>> aniso8601.parse_time('22:33.5')
+ datetime.time(22, 33, 30)
+ >>> aniso8601.parse_time('23.75')
+ datetime.time(23, 45)
+
+Leap seconds are currently not supported::
+
+ >>> aniso8601.parse_time('21:42:60')
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/time.py", line 107, in parse_time
+ return _parse_time_naive(timestr)
+ File "aniso8601/time.py", line 136, in _parse_time_naive
+ return _resolution_map[get_time_resolution(timestr)](timestr)
+ File "aniso8601/time.py", line 199, in _parse_second_time
+ raise ValueError('Seconds must be less than 60.')
+ ValueError: Seconds must be less than 60.
+
+Parsing durations
+-----------------
+
+To parse a duration formatted as an ISO 8601 string::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_duration('P1Y2M3DT4H54M6S')
+ datetime.timedelta(428, 17646)
+
+Reduced accuracy is supported::
+
+ >>> aniso8601.parse_duration('P1Y')
+ datetime.timedelta(365)
+
+A decimal fraction is allowed on the lowest order element::
+
+ >>> aniso8601.parse_duration('P1YT3.5M')
+ datetime.timedelta(365, 210)
+
+The decimal fraction can be specified with a comma instead of a full-stop::
+
+ >>> aniso8601.parse_duration('P1YT3,5M')
+ datetime.timedelta(365, 210)
+
+Parsing a duration from a combined date and time is supported as well::
+
+ >>> aniso8601.parse_duration('P0001-01-02T01:30:5')
+ datetime.timedelta(397, 5405)
+
+The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used::
+
+ >>> import aniso8601
+ >>> from datetime import date
+ >>> one_month = aniso8601.parse_duration('P1M', relative=True)
+ >>> print one_month
+ relativedelta(months=+1)
+ >>> date(2003,1,27) + one_month
+ datetime.date(2003, 2, 27)
+ >>> date(2003,1,31) + one_month
+ datetime.date(2003, 2, 28)
+ >>> date(2003,1,31) + two_months
+ datetime.date(2003, 3, 31)
+
+Since a relative fractional month or year is not logical, a ValueError is raised when attempting to parse a duration with :code:`relative=True` and fractional month or year::
+
+ >>> aniso8601.parse_duration('P2.1Y', relative=True)
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/duration.py", line 29, in parse_duration
+ return _parse_duration_prescribed(isodurationstr, relative)
+ File "aniso8601/duration.py", line 150, in _parse_duration_prescribed
+ raise ValueError('Fractional months and years are not defined for relative intervals.')
+ ValueError: Fractional months and years are not defined for relative intervals.
+
+Parsing intervals
+-----------------
+
+To parse an interval specified by a start and end::
+
+ >>> import aniso8601
+ >>> aniso8601.parse_interval('2007-03-01T13:00:00/2008-05-11T15:30:00')
+ (datetime.datetime(2007, 3, 1, 13, 0), datetime.datetime(2008, 5, 11, 15, 30))
+
+Intervals specified by a start time and a duration are supported::
+
+ >>> aniso8601.parse_interval('2007-03-01T13:00:00Z/P1Y2M10DT2H30M')
+ (datetime.datetime(2007, 3, 1, 13, 0, tzinfo=+0:00:00 UTC), datetime.datetime(2008, 5, 9, 15, 30, tzinfo=+0:00:00 UTC))
+
+A duration can also be specified by a duration and end time::
+
+ >>> aniso8601.parse_interval('P1M/1981-04-05')
+ (datetime.date(1981, 4, 5), datetime.date(1981, 3, 6))
+
+Notice that the result of the above parse is not in order from earliest to latest. If sorted intervals are required, simply use the 'sorted' keyword as shown below::
+
+ >>> sorted(aniso8601.parse_interval('P1M/1981-04-05'))
+ [datetime.date(1981, 3, 6), datetime.date(1981, 4, 5)]
+
+The end of an interval is given as a datetime when required to maintain the resolution specified by a duration, even if the duration start is given as a date::
+
+ >>> aniso8601.parse_interval('2014-11-12/PT4H54M6.5S')
+ (datetime.date(2014, 11, 12), datetime.datetime(2014, 11, 12, 4, 54, 6, 500000))
+
+Repeating intervals are supported as well, and return a generator::
+
+ >>> aniso8601.parse_repeating_interval('R3/1981-04-05/P1D')
+
+ >>> list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D'))
+ [datetime.date(1981, 4, 5), datetime.date(1981, 4, 6), datetime.date(1981, 4, 7)]
+
+Repeating intervals are allowed to go in the reverse direction::
+
+ >>> list(aniso8601.parse_repeating_interval('R2/PT1H2M/1980-03-05T01:01:00'))
+ [datetime.datetime(1980, 3, 5, 1, 1), datetime.datetime(1980, 3, 4, 23, 59)]
+
+Unbounded intervals are also allowed (Python 2)::
+
+ >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
+ >>> result.next()
+ datetime.datetime(1980, 3, 5, 1, 1)
+ >>> result.next()
+ datetime.datetime(1980, 3, 4, 23, 59)
+
+or for Python 3::
+
+ >>> result = aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
+ >>> next(result)
+ datetime.datetime(1980, 3, 5, 1, 1)
+ >>> next(result)
+ datetime.datetime(1980, 3, 4, 23, 59)
+
+Note that you should never try to convert a generator produced by an unbounded interval to a list::
+
+ >>> list(aniso8601.parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00'))
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/interval.py", line 156, in _date_generator_unbounded
+ currentdate += timedelta
+ OverflowError: date value out of range
+
+The above treat years as 365 days and months as 30 days. If calendar level accuracy is required, the relative keyword argument can be used::
+
+ >>> aniso8601.parse_interval('2003-01-27/P1M', relative=True)
+ (datetime.date(2003, 1, 27), datetime.date(2003, 2, 27))
+ >>> aniso8601.parse_interval('2003-01-31/P1M', relative=True)
+ (datetime.date(2003, 1, 31), datetime.date(2003, 2, 28))
+ >>> aniso8601.parse_interval('P1Y/2001-02-28', relative=True)
+ (datetime.date(2001, 2, 28), datetime.date(2000, 2, 28)
+
+Fractional years and months do not make sense for relative intervals. A ValueError is raised when attempting to parse an interval with :code:`relative=True` and a fractional month or year::
+
+ >>> aniso8601.parse_interval('P1.1Y/2001-02-28', relative=True)
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "aniso8601/interval.py", line 33, in parse_interval
+ interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative)
+ File "aniso8601/interval.py", line 84, in _parse_interval_parts
+ duration = parse_duration(firstpart, relative=relative)
+ File "aniso8601/duration.py", line 29, in parse_duration
+ return _parse_duration_prescribed(isodurationstr, relative)
+ File "aniso8601/duration.py", line 150, in _parse_duration_prescribed
+ raise ValueError('Fractional months and years are not defined for relative intervals.')
+ ValueError: Fractional months and years are not defined for relative intervals.
+
+Date and time resolution
+------------------------
+
+In some situations, it may be useful to figure out the resolution provided by an ISO 8601 date or time string. Two functions are provided for this purpose.
+
+To get the resolution of a ISO 8601 time string::
+
+ >>> aniso8601.get_time_resolution('11:31:14') == aniso8601.resolution.TimeResolution.Seconds
+ True
+ >>> aniso8601.get_time_resolution('11:31') == aniso8601.resolution.TimeResolution.Minutes
+ True
+ >>> aniso8601.get_time_resolution('11') == aniso8601.resolution.TimeResolution.Hours
+ True
+
+Similarly, for an ISO 8601 date string::
+
+ >>> aniso8601.get_date_resolution('1981-04-05') == aniso8601.resolution.DateResolution.Day
+ True
+ >>> aniso8601.get_date_resolution('1981-04') == aniso8601.resolution.DateResolution.Month
+ True
+ >>> aniso8601.get_date_resolution('1981') == aniso8601.resolution.DateResolution.Year
+ True
+
+Tests
+=====
+
+To run the unit tests, navigate to the source directory and run the tests for the python version being worked on (python2, python3)::
+
+ $ python2 -m unittest discover aniso8601/tests/
+
+or::
+
+ $ python3 -m unittest discover aniso8601/tests/
+
+Contributing
+============
+
+aniso8601 is an open source project hosted on `Bitbucket `_.
+
+Any and all bugs are welcome on our `issue tracker `_.
+Of particular interest are valid ISO 8601 strings that don't parse, or invalid ones that do. At a minimum,
+bug reports should include an example of the misbehaving string, as well as the expected result. Of course
+patches containing unit tests (or fixed bugs) are welcome!
+
+References
+==========
+
+* `ISO 8601:2004(E) `_ (Caution, PDF link)
+* `Wikipedia article on ISO 8601 `_
+* `Discussion on alternative ISO 8601 parsers for Python `_
+
+
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..2c7187b0867bd2b24189e6f49e6caab73384150c
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/RECORD
@@ -0,0 +1,23 @@
+aniso8601/__init__.py,sha256=YK7QHGyZIzxe4Lk9M5SUXEfaJ8zin40BHkZUpAkmMZQ,527
+aniso8601/compat.py,sha256=jU6lxdj_8rn4RNDUQPvr6gx_ITNys0kueYc0zksSobM,305
+aniso8601/date.py,sha256=8QpDFWGgm4YoTiXgXss7cBFlcHvEgTpe4ge0jcZMfNE,8562
+aniso8601/duration.py,sha256=HNxmAnVhs8iRlJY69CNxD5wFFDzE91GBBNO7aOJoRpM,9587
+aniso8601/interval.py,sha256=xjPzaVw7g_XZrBFGQcJ16r-8CqOGwAm9w4ktU_9-vJQ,6504
+aniso8601/resolution.py,sha256=Bz21CG3drwJWYim7i_-PB08ijHuCE5RWhoPe6fPPQ5M,422
+aniso8601/time.py,sha256=hVCid4YvmKsqhrfsx_A9nPI-1HEvDNI7sLMCP1v-vcI,7029
+aniso8601/timezone.py,sha256=LtYulQtqjP-8xiIDqxfVv26ovEru9URO0WCFef6ZBUw,4138
+aniso8601-1.3.0.dist-info/DESCRIPTION.rst,sha256=tln-FG-LXmFQRAcPZFHG7KGBZqWLTl84XhU1ecMz7dc,12168
+aniso8601-1.3.0.dist-info/METADATA,sha256=8bSsQdbETmpJMTK6VSB1sq71z9QY18FVzpLtR3S1PhU,12836
+aniso8601-1.3.0.dist-info/RECORD,,
+aniso8601-1.3.0.dist-info/WHEEL,sha256=F38j99qfcoNzHo88G-kisXWtI3MVVEd9JWjaAPcHMTc,93
+aniso8601-1.3.0.dist-info/metadata.json,sha256=PzBisshvIXRlsFDq05pOVOU5_rnFrJPhbNBIT7V0YSw,827
+aniso8601-1.3.0.dist-info/top_level.txt,sha256=MVQomyeED8nGIH7PUQdMzxgLppIB48oYHtcmL17ETB0,10
+aniso8601-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+aniso8601/__init__.pyc,,
+aniso8601/time.pyc,,
+aniso8601/duration.pyc,,
+aniso8601/interval.pyc,,
+aniso8601/timezone.pyc,,
+aniso8601/resolution.pyc,,
+aniso8601/date.pyc,,
+aniso8601/compat.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..1b492296b11ac941d2d468a3d241bed416ff1794
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0)
+Root-Is-Purelib: true
+Tag: cp27-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..5c19bb0cf93362f478089ae4111d173b886a415c
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "nielsenb@jetfuse.net", "name": "Brandon Nielsen", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/nielsenb/aniso8601"}}}, "extras": [], "generator": "bdist_wheel (0.30.0)", "metadata_version": "2.0", "name": "aniso8601", "run_requires": [{"requires": ["python-dateutil"]}], "summary": "A library for parsing ISO 8601 strings.", "version": "1.3.0"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..166ae78c59960fa87b668c4a8b5c0f7c80329aae
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601-1.3.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+aniso8601
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/__init__.py b/appWSM/lib/python2.7/site-packages/aniso8601/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f88aef2c86e08328944fd0b748fd2df1e3e4d21
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/__init__.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+#Import the main parsing functions so they are readily available
+from aniso8601.time import parse_datetime, parse_time, get_time_resolution
+from aniso8601.date import parse_date, get_date_resolution
+from aniso8601.duration import parse_duration
+from aniso8601.interval import parse_interval, parse_repeating_interval
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/compat.py b/appWSM/lib/python2.7/site-packages/aniso8601/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c6081465cad3868d3063ee72c7b9fea62e438c5
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/compat.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ range = xrange
+else:
+ range = range
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/date.py b/appWSM/lib/python2.7/site-packages/aniso8601/date.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b9b713b9688f8b83793b9ecfa0403bf5f56dfe8
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/date.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import datetime
+
+from aniso8601.resolution import DateResolution
+
+def get_date_resolution(isodatestr):
+ #Valid string formats are:
+ #
+ #Y[YYY]
+ #YYYY-MM-DD
+ #YYYYMMDD
+ #YYYY-MM
+ #YYYY-Www
+ #YYYYWww
+ #YYYY-Www-D
+ #YYYYWwwD
+ #YYYY-DDD
+ #YYYYDDD
+ if isodatestr.startswith('+') or isodatestr.startswith('-'):
+ raise NotImplementedError('ISO 8601 extended year representation not supported.')
+
+ isodatestrlen = len(isodatestr)
+
+ if isodatestr.find('W') != -1:
+ #Handle ISO 8601 week date format
+ hyphens_present = 1 if isodatestr.find('-') != -1 else 0
+ week_date_len = 7 + hyphens_present
+ weekday_date_len = 8 + 2 * hyphens_present
+
+ if isodatestrlen == week_date_len:
+ #YYYY-Www
+ #YYYYWww
+ return DateResolution.Week
+ elif isodatestrlen == weekday_date_len:
+ #YYYY-Www-D
+ #YYYYWwwD
+ return DateResolution.Weekday
+ else:
+ raise ValueError('String is not a valid ISO 8601 week date.')
+
+ #If the size of the string of 4 or less, assume its a truncated year representation
+ if len(isodatestr) <= 4:
+ return DateResolution.Year
+
+ #An ISO string may be a calendar represntation if:
+ # 1) When split on a hyphen, the sizes of the parts are 4, 2, 2 or 4, 2
+ # 2) There are no hyphens, and the length is 8
+ datestrsplit = isodatestr.split('-')
+
+ #Check case 1
+ if len(datestrsplit) == 2:
+ if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 2:
+ return DateResolution.Month
+
+ if len(datestrsplit) == 3:
+ if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 2 and len(datestrsplit[2]) == 2:
+ return DateResolution.Day
+
+ #Check case 2
+ if len(isodatestr) == 8 and isodatestr.find('-') == -1:
+ return DateResolution.Day
+
+ #An ISO string may be a ordinal date representation if:
+ # 1) When split on a hyphen, the sizes of the parts are 4, 3
+ # 2) There are no hyphens, and the length is 7
+
+ #Check case 1
+ if len(datestrsplit) == 2:
+ if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 3:
+ return DateResolution.Ordinal
+
+ #Check case 2
+ if len(isodatestr) == 7 and isodatestr.find('-') == -1:
+ return DateResolution.Ordinal
+
+ #None of the date representations match
+ raise ValueError('String is not an ISO 8601 date, perhaps it represents a time or datetime.')
+
+def parse_date(isodatestr):
+ #Given a string in any ISO 8601 date format, return a datetime.date
+ #object that corresponds to the given date. Valid string formats are:
+ #
+ #Y[YYY]
+ #YYYY-MM-DD
+ #YYYYMMDD
+ #YYYY-MM
+ #YYYY-Www
+ #YYYYWww
+ #YYYY-Www-D
+ #YYYYWwwD
+ #YYYY-DDD
+ #YYYYDDD
+ #
+ #Note that the ISO 8601 date format of ±YYYYY is expressly not supported
+ return _resolution_map[get_date_resolution(isodatestr)](isodatestr)
+
+def _parse_year(yearstr):
+ #yearstr is of the format Y[YYY]
+ #
+ #0000 (1 BC) is not representible as a Python date so a ValueError is
+ #raised
+ #
+ #Truncated dates, like '19', refer to 1900-1999 inclusive, we simply parse
+ #to 1900-01-01
+ #
+ #Since no additional resolution is provided, the month is set to 1, and
+ #day is set to 1
+
+ if len(yearstr) == 4:
+ return datetime.date(int(yearstr), 1, 1)
+ else:
+ #Shift 0s in from the left to form complete year
+ return datetime.date(int(yearstr.ljust(4, '0')), 1, 1)
+
+def _parse_calendar_day(datestr):
+ #datestr is of the format YYYY-MM-DD or YYYYMMDD
+ datestrlen = len(datestr)
+
+ if datestrlen == 10:
+ #YYYY-MM-DD
+ strformat = '%Y-%m-%d'
+ elif datestrlen == 8:
+ #YYYYMMDD
+ strformat = '%Y%m%d'
+ else:
+ raise ValueError('String is not a valid ISO 8601 calendar day.')
+
+ parseddatetime = datetime.datetime.strptime(datestr, strformat)
+
+ #Since no 'time' is given, cast to a date
+ return parseddatetime.date()
+
+def _parse_calendar_month(datestr):
+ #datestr is of the format YYYY-MM
+ datestrlen = len(datestr)
+
+ if datestrlen != 7:
+ raise ValueError('String is not a valid ISO 8601 calendar month.')
+
+ parseddatetime = datetime.datetime.strptime(datestr, '%Y-%m')
+
+ #Since no 'time' is given, cast to a date
+ return parseddatetime.date()
+
+def _parse_week_day(datestr):
+ #datestr is of the format YYYY-Www-D, YYYYWwwD
+ #
+ #W is the week number prefix, ww is the week number, between 1 and 53
+ #0 is not a valid week number, which differs from the Python implementation
+ #
+ #D is the weekday number, between 1 and 7, which differs from the Python
+ #implementation which is between 0 and 6
+
+ isoyear = int(datestr[0:4])
+ gregorianyearstart = _iso_year_start(isoyear)
+
+ #Week number will be the two characters after the W
+ windex = datestr.find('W')
+ isoweeknumber = int(datestr[windex + 1:windex + 3])
+
+ if isoweeknumber == 0:
+ raise ValueError('00 is not a valid ISO 8601 weeknumber.')
+
+ datestrlen = len(datestr)
+
+ if datestr.find('-') != -1:
+ if datestrlen == 10:
+ #YYYY-Www-D
+ isoday = int(datestr[9:10])
+
+ return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=isoday - 1)
+ else:
+ raise ValueError('String is not a valid ISO 8601 week date.')
+ else:
+ if datestrlen == 8:
+ #YYYYWwwD
+ isoday = int(datestr[7:8])
+
+ return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=isoday - 1)
+ else:
+ raise ValueError('String is not a valid ISO 8601 week date.')
+
+def _parse_week(datestr):
+ #datestr is of the format YYYY-Www, YYYYWww
+ #
+ #W is the week number prefix, ww is the week number, between 1 and 53
+ #0 is not a valid week number, which differs from the Python implementation
+
+ isoyear = int(datestr[0:4])
+ gregorianyearstart = _iso_year_start(isoyear)
+
+ #Week number will be the two characters after the W
+ windex = datestr.find('W')
+ isoweeknumber = int(datestr[windex + 1:windex + 3])
+
+ if isoweeknumber == 0:
+ raise ValueError('00 is not a valid ISO 8601 weeknumber.')
+
+ datestrlen = len(datestr)
+
+ if datestr.find('-') != -1:
+ if datestrlen == 8:
+ #YYYY-Www
+ #Suss out the date
+ return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=0)
+ else:
+ raise ValueError('String is not a valid ISO 8601 week date.')
+ else:
+ if datestrlen == 7:
+ #YYYYWww
+ return gregorianyearstart + datetime.timedelta(weeks=isoweeknumber - 1, days=0)
+ else:
+ raise ValueError('String is not a valid ISO 8601 week date.')
+
+def _parse_ordinal_date(datestr):
+ #datestr is of the format YYYY-DDD or YYYYDDD
+ #DDD can be from 1 - 365, this matches Python's definition
+
+ if datestr.find('-') != -1:
+ #YYYY-DDD
+ parseddatetime = datetime.datetime.strptime(datestr, '%Y-%j')
+
+ #Since no 'time' is given, cast to a date
+ return parseddatetime.date()
+ else:
+ #YYYYDDD
+ parseddatetime = datetime.datetime.strptime(datestr, '%Y%j')
+
+ #Since no 'time' is given, cast to a date
+ return parseddatetime.date()
+
+def _iso_year_start(isoyear):
+ #Given an ISO year, returns the equivalent of the start of the year on the
+ #Gregorian calendar (which is used by Python)
+ #Stolen from:
+ #http://stackoverflow.com/questions/304256/whats-the-best-way-to-find-the-inverse-of-datetime-isocalendar
+
+ #Determine the location of the 4th of January, the first week of the ISO
+ #year in the week containing the 4th of January
+ #http://en.wikipedia.org/wiki/ISO_week_date
+ fourth_jan = datetime.date(isoyear, 1, 4)
+
+ #Note the conversion from ISO day (1 - 7) and Python day (0 - 6)
+ delta = datetime.timedelta(fourth_jan.isoweekday() - 1)
+
+ #Return the start of the year
+ return fourth_jan - delta
+
+_resolution_map = {
+ DateResolution.Day: _parse_calendar_day,
+ DateResolution.Ordinal: _parse_ordinal_date,
+ DateResolution.Month: _parse_calendar_month,
+ DateResolution.Week: _parse_week,
+ DateResolution.Weekday: _parse_week_day,
+ DateResolution.Year: _parse_year
+}
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/duration.py b/appWSM/lib/python2.7/site-packages/aniso8601/duration.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1a1cea512a5e20a77bd841c066b8298e3c66c5d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/duration.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import datetime
+import dateutil.relativedelta
+
+from aniso8601.date import parse_date
+from aniso8601.time import parse_time
+from aniso8601 import compat
+
+def parse_duration(isodurationstr, relative=False):
+ #Given a string representing an ISO 8601 duration, return a
+ #datetime.timedelta (or dateutil.relativedelta.relativedelta
+ #if relative=True) that matches the given duration. Valid formats are:
+ #
+ #PnYnMnDTnHnMnS (or any reduced precision equivalent)
+ #PT
+
+ if isodurationstr[0] != 'P':
+ raise ValueError('ISO 8601 duration must start with a P.')
+
+ #If Y, M, D, H, S, or W are in the string, assume it is a specified duration
+ if _has_any_component(isodurationstr, ['Y', 'M', 'D', 'H', 'S', 'W']) is True:
+ return _parse_duration_prescribed(isodurationstr, relative)
+ else:
+ return _parse_duration_combined(isodurationstr, relative)
+
+def _parse_duration_prescribed(durationstr, relative):
+ #durationstr can be of the form PnYnMnDTnHnMnS or PnW
+
+ #Make sure the end character is valid
+ #https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
+ if durationstr[-1] not in ['Y', 'M', 'D', 'H', 'S', 'W']:
+ raise ValueError('ISO 8601 duration must end with a valid character.')
+
+ #Make sure only the lowest order element has decimal precision
+ if durationstr.count('.') > 1:
+ raise ValueError('ISO 8601 allows only lowest order element to have a decimal fraction.')
+ elif durationstr.count('.') == 1:
+ #There should only ever be 1 letter after a decimal if there is more
+ #then one, the string is invalid
+ lettercount = 0;
+
+ for character in durationstr.split('.')[1]:
+ if character.isalpha() is True:
+ lettercount += 1
+
+ if lettercount > 1:
+ raise ValueError('ISO 8601 duration must end with a single valid character.')
+
+ #Do not allow W in combination with other designators
+ #https://bitbucket.org/nielsenb/aniso8601/issues/2/week-designators-should-not-be-combinable
+ if durationstr.find('W') != -1 and _has_any_component(durationstr, ['Y', 'M', 'D', 'H', 'S']) is True:
+ raise ValueError('ISO 8601 week designators may not be combined with other time designators.')
+
+ #Parse the elements of the duration
+ if durationstr.find('T') == -1:
+ #Make sure no time portion is included
+ #https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
+ if _has_any_component(durationstr, ['H', 'S']):
+ raise ValueError('ISO 8601 time components not allowed in duration without prescribed time.')
+
+ if _component_order_correct(durationstr, ['P', 'Y', 'M', 'D', 'W']) is False:
+ raise ValueError('ISO 8601 duration components must be in the correct order.')
+
+ if durationstr.find('Y') != -1:
+ years = _parse_duration_element(durationstr, 'Y')
+ else:
+ years = 0
+
+ if durationstr.find('M') != -1:
+ months = _parse_duration_element(durationstr, 'M')
+ else:
+ months = 0
+
+ if durationstr.find('W') != -1:
+ weeks = _parse_duration_element(durationstr, 'W')
+ else:
+ weeks = 0
+
+ if durationstr.find('D') != -1:
+ days = _parse_duration_element(durationstr, 'D')
+ else:
+ days = 0
+
+ #No hours, minutes or seconds
+ hours = 0
+ minutes = 0
+ seconds = 0
+ else:
+ firsthalf = durationstr[:durationstr.find('T')]
+ secondhalf = durationstr[durationstr.find('T'):]
+
+ #Make sure no time portion is included in the date half
+ #https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
+ if _has_any_component(firsthalf, ['H', 'S']):
+ raise ValueError('ISO 8601 time components not allowed in date portion of duration.')
+
+ if _component_order_correct(firsthalf, ['P', 'Y', 'M', 'D', 'W']) is False:
+ raise ValueError('ISO 8601 duration components must be in the correct order.')
+
+ #Make sure no date component is included in the time half
+ if _has_any_component(secondhalf, ['Y', 'D']):
+ raise ValueError('ISO 8601 time components not allowed in date portion of duration.')
+
+ if _component_order_correct(secondhalf, ['T', 'H', 'M', 'S']) is False:
+ raise ValueError('ISO 8601 time components in duration must be in the correct order.')
+
+ if firsthalf.find('Y') != -1:
+ years = _parse_duration_element(firsthalf, 'Y')
+ else:
+ years = 0
+
+ if firsthalf.find('M') != -1:
+ months = _parse_duration_element(firsthalf, 'M')
+ else:
+ months = 0
+
+ if firsthalf.find('D') != -1:
+ days = _parse_duration_element(firsthalf, 'D')
+ else:
+ days = 0
+
+ if secondhalf.find('H') != -1:
+ hours = _parse_duration_element(secondhalf, 'H')
+ else:
+ hours = 0
+
+ if secondhalf.find('M') != -1:
+ minutes = _parse_duration_element(secondhalf, 'M')
+ else:
+ minutes = 0
+
+ if secondhalf.find('S') != -1:
+ seconds = _parse_duration_element(secondhalf, 'S')
+ else:
+ seconds = 0
+
+ #Weeks can't be included
+ weeks = 0
+
+ if relative == True:
+ if int(years) != years or int(months) != months:
+ #https://github.com/dateutil/dateutil/issues/40
+ raise ValueError('Fractional months and years are not defined for relative intervals.')
+
+ return dateutil.relativedelta.relativedelta(years=int(years), months=int(months), weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
+ else:
+ #Note that weeks can be handled without conversion to days
+ totaldays = years * 365 + months * 30 + days
+
+ return datetime.timedelta(weeks=weeks, days=totaldays, hours=hours, minutes=minutes, seconds=seconds)
+
+def _parse_duration_combined(durationstr, relative):
+ #Period of the form PT
+
+ #Split the string in to its component parts
+ datepart, timepart = durationstr[1:].split('T') #We skip the 'P'
+
+ datevalue = parse_date(datepart)
+ timevalue = parse_time(timepart)
+
+ if relative is True:
+ return dateutil.relativedelta.relativedelta(years=datevalue.year, months=datevalue.month, days=datevalue.day, hours=timevalue.hour, minutes=timevalue.minute, seconds=timevalue.second, microseconds=timevalue.microsecond)
+ else:
+ totaldays = datevalue.year * 365 + datevalue.month * 30 + datevalue.day
+
+ return datetime.timedelta(days=totaldays, hours=timevalue.hour, minutes=timevalue.minute, seconds=timevalue.second, microseconds=timevalue.microsecond)
+
+def _parse_duration_element(durationstr, elementstr):
+ #Extracts the specified portion of a duration, for instance, given:
+ #durationstr = 'T4H5M6.1234S'
+ #elementstr = 'H'
+ #
+ #returns 4
+ #
+ #Note that the string must start with a character, so its assumed the
+ #full duration string would be split at the 'T'
+
+ durationstartindex = 0
+ durationendindex = durationstr.find(elementstr)
+
+ for characterindex in compat.range(durationendindex - 1, 0, -1):
+ if durationstr[characterindex].isalpha() is True:
+ durationstartindex = characterindex
+ break
+
+ durationstartindex += 1
+
+ if ',' in durationstr:
+ #Replace the comma with a 'full-stop'
+ durationstr = durationstr.replace(',', '.')
+
+ return float(durationstr[durationstartindex:durationendindex])
+
+def _has_any_component(durationstr, components):
+ #Given a duration string, and a list of components, returns True
+ #if any of the listed components are present, False otherwise.
+ #
+ #For instance:
+ #durationstr = 'P1Y'
+ #components = ['Y', 'M']
+ #
+ #returns True
+ #
+ #durationstr = 'P1Y'
+ #components = ['M', 'D']
+ #
+ #returns False
+
+ for component in components:
+ if durationstr.find(component) != -1:
+ return True
+
+ return False
+
+def _component_order_correct(durationstr, componentorder):
+ #Given a duration string, and a list of components, returns
+ #True if the components are in the same order as the
+ #component order list, False otherwise. Characters that
+ #are present in the component order list but not in the
+ #duration string are ignored.
+ #
+ #https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
+ #
+ #durationstr = 'P1Y1M1D'
+ #components = ['P', 'Y', 'M', 'D']
+ #
+ #returns True
+ #
+ #durationstr = 'P1Y1M'
+ #components = ['P', 'Y', 'M', 'D']
+ #
+ #returns True
+ #
+ #durationstr = 'P1D1Y1M'
+ #components = ['P', 'Y', 'M', 'D']
+ #
+ #returns False
+
+ componentindex = 0
+
+ for characterindex in compat.range(len(durationstr)):
+ character = durationstr[characterindex]
+
+ if character in componentorder:
+ #This is a character we need to check the order of
+ if character in componentorder[componentindex:]:
+ componentindex = componentorder.index(character)
+ else:
+ #A character is out of order
+ return False
+
+ return True
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/interval.py b/appWSM/lib/python2.7/site-packages/aniso8601/interval.py
new file mode 100644
index 0000000000000000000000000000000000000000..838f6e9f35885d83327d86a1fffa572ef84a90ee
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/interval.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+from datetime import datetime
+from aniso8601.duration import parse_duration
+from aniso8601.time import parse_datetime
+from aniso8601.date import parse_date
+
+def parse_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False):
+ #Given a string representing an ISO 8601 interval, return a
+ #tuple of datetime.date or date.datetime objects representing the beginning
+ #and end of the specified interval. Valid formats are:
+ #
+ #/
+ #/
+ #/
+ #
+ #The and values can represent dates, or datetimes,
+ #not times.
+ #
+ #The format:
+ #
+ #
+ #
+ #Is expressly not supported as there is no way to provide the addtional
+ #required context.
+
+ interval_parts = _parse_interval_parts(isointervalstr, intervaldelimiter, datetimedelimiter, relative)
+
+ return (interval_parts[0], interval_parts[1])
+
+def parse_repeating_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False):
+ #Given a string representing an ISO 8601 interval repating, return a
+ #generator of datetime.date or date.datetime objects representing the
+ #dates specified by the repeating interval. Valid formats are:
+ #
+ #Rnn/
+ #R/
+
+ if isointervalstr[0] != 'R':
+ raise ValueError('ISO 8601 repeating interval must start with an R.')
+
+ #Parse the number of iterations
+ iterationpart, intervalpart = isointervalstr.split(intervaldelimiter, 1)
+
+ if len(iterationpart) > 1:
+ iterations = int(iterationpart[1:])
+ else:
+ iterations = None
+
+ interval_parts = _parse_interval_parts(intervalpart, intervaldelimiter, datetimedelimiter, relative=relative)
+
+ #Now, build and return the generator
+ if iterations != None:
+ return _date_generator(interval_parts[0], interval_parts[2], iterations)
+ else:
+ return _date_generator_unbounded(interval_parts[0], interval_parts[2])
+
+def _parse_interval_parts(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False):
+ #Returns a tuple containing the start of the interval, the end of the interval, and the interval timedelta
+ firstpart, secondpart = isointervalstr.split(intervaldelimiter)
+
+ if firstpart[0] == 'P':
+ #/
+ #Notice that these are not returned 'in order' (earlier to later), this
+ #is to maintain consistency with parsing / durations, as
+ #well as making repeating interval code cleaner. Users who desire
+ #durations to be in order can use the 'sorted' operator.
+
+ #We need to figure out if is a date, or a datetime
+ if secondpart.find(datetimedelimiter) != -1:
+ # is a datetime
+ duration = parse_duration(firstpart, relative=relative)
+ enddatetime = parse_datetime(secondpart, delimiter=datetimedelimiter)
+
+ return (enddatetime, enddatetime - duration, -duration)
+ else:
+ # must just be a date
+ duration = parse_duration(firstpart, relative=relative)
+ enddate = parse_date(secondpart)
+
+ #See if we need to upconvert to datetime to preserve resolution
+ if firstpart.find(datetimedelimiter) != -1:
+ return (enddate, datetime.combine(enddate, datetime.min.time()) - duration, -duration)
+ else:
+ return (enddate, enddate - duration, -duration)
+ elif secondpart[0] == 'P':
+ #/
+ #We need to figure out if is a date, or a datetime
+ if firstpart.find(datetimedelimiter) != -1:
+ # is a datetime
+ duration = parse_duration(secondpart, relative=relative)
+ startdatetime = parse_datetime(firstpart, delimiter=datetimedelimiter)
+
+ return (startdatetime, startdatetime + duration, duration)
+ else:
+ # must just be a date
+ duration = parse_duration(secondpart, relative=relative)
+ startdate = parse_date(firstpart)
+
+ #See if we need to upconvert to datetime to preserve resolution
+ if secondpart.find(datetimedelimiter) != -1:
+ return (startdate, datetime.combine(startdate, datetime.min.time()) + duration, duration)
+ else:
+ return (startdate, startdate + duration, duration)
+ else:
+ #/
+ if firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) != -1:
+ #Both parts are datetimes
+ start_datetime = parse_datetime(firstpart, delimiter=datetimedelimiter)
+ end_datetime = parse_datetime(secondpart, delimiter=datetimedelimiter)
+
+ return (start_datetime, end_datetime, end_datetime - start_datetime)
+ elif firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) == -1:
+ #First part is a datetime, second part is a date
+ start_datetime = parse_datetime(firstpart, delimiter=datetimedelimiter)
+ end_date = parse_date(secondpart)
+
+ return (start_datetime, end_date, datetime.combine(end_date, datetime.min.time()) - start_datetime)
+ elif firstpart.find(datetimedelimiter) == -1 and secondpart.find(datetimedelimiter) != -1:
+ #First part is a date, second part is a datetime
+ start_date = parse_date(firstpart)
+ end_datetime = parse_datetime(secondpart, delimiter=datetimedelimiter)
+
+ return (start_date, end_datetime, end_datetime - datetime.combine(start_date, datetime.min.time()))
+ else:
+ #Both parts are dates
+ start_date = parse_date(firstpart)
+ end_date = parse_date(secondpart)
+
+ return (start_date, end_date, end_date - start_date)
+
+def _date_generator(startdate, timedelta, iterations):
+ currentdate = startdate
+ currentiteration = 0
+
+ while currentiteration < iterations:
+ yield currentdate
+
+ #Update the values
+ currentdate += timedelta
+ currentiteration += 1
+
+def _date_generator_unbounded(startdate, timedelta):
+ currentdate = startdate
+
+ while True:
+ yield currentdate
+
+ #Update the value
+ currentdate += timedelta
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/resolution.py b/appWSM/lib/python2.7/site-packages/aniso8601/resolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec0685daf09398cd6395ee99a63ebf422d6d0dbd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/resolution.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+from aniso8601 import compat
+
+class DateResolution(object):
+ Year, Month, Week, Weekday, Day, Ordinal = list(compat.range(6))
+
+class TimeResolution(object):
+ Seconds, Minutes, Hours = list(compat.range(3))
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/time.py b/appWSM/lib/python2.7/site-packages/aniso8601/time.py
new file mode 100644
index 0000000000000000000000000000000000000000..efeb7efadca7a72094a967ddac9720d7a978cd57
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/time.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import datetime
+
+from aniso8601.timezone import parse_timezone, build_utcoffset
+from aniso8601.date import parse_date
+from aniso8601.resolution import TimeResolution
+
+def get_time_resolution(isotimestr):
+ #Valid time formats are:
+ #
+ #hh:mm:ss
+ #hhmmss
+ #hh:mm
+ #hhmm
+ #hh
+ #hh:mm:ssZ
+ #hhmmssZ
+ #hh:mmZ
+ #hhmmZ
+ #hhZ
+ #hh:mm:ss±hh:mm
+ #hhmmss±hh:mm
+ #hh:mm±hh:mm
+ #hhmm±hh:mm
+ #hh±hh:mm
+ #hh:mm:ss±hhmm
+ #hhmmss±hhmm
+ #hh:mm±hhmm
+ #hhmm±hhmm
+ #hh±hhmm
+ #hh:mm:ss±hh
+ #hhmmss±hh
+ #hh:mm±hh
+ #hhmm±hh
+ #hh±hh
+
+ timestr = _split_tz(isotimestr)[0]
+
+ if timestr.count(':') == 2:
+ #hh:mm:ss
+ return TimeResolution.Seconds
+ elif timestr.count(':') == 1:
+ #hh:mm
+ return TimeResolution.Minutes
+
+ #Format must be hhmmss, hhmm, or hh
+ if timestr.find('.') == -1:
+ #No time fractions
+ timestrlen = len(timestr)
+ else:
+ #The lowest order element is a fraction
+ timestrlen = len(timestr.split('.')[0])
+
+ if timestrlen == 6:
+ #hhmmss
+ return TimeResolution.Seconds
+ elif timestrlen == 4:
+ #hhmm
+ return TimeResolution.Minutes
+ elif timestrlen == 2:
+ #hh
+ return TimeResolution.Hours
+ else:
+ raise ValueError('String is not a valid ISO 8601 time.')
+
+def parse_time(isotimestr):
+ #Given a string in any ISO 8601 time format, return a datetime.time object
+ #that corresponds to the given time. Fixed offset tzdata will be included
+ #if UTC offset is given in the input string. Valid time formats are:
+ #
+ #hh:mm:ss
+ #hhmmss
+ #hh:mm
+ #hhmm
+ #hh
+ #hh:mm:ssZ
+ #hhmmssZ
+ #hh:mmZ
+ #hhmmZ
+ #hhZ
+ #hh:mm:ss±hh:mm
+ #hhmmss±hh:mm
+ #hh:mm±hh:mm
+ #hhmm±hh:mm
+ #hh±hh:mm
+ #hh:mm:ss±hhmm
+ #hhmmss±hhmm
+ #hh:mm±hhmm
+ #hhmm±hhmm
+ #hh±hhmm
+ #hh:mm:ss±hh
+ #hhmmss±hh
+ #hh:mm±hh
+ #hhmm±hh
+ #hh±hh
+
+ (timestr, tzstr) = _split_tz(isotimestr)
+
+ if tzstr is None:
+ return _parse_time_naive(timestr)
+ elif tzstr == 'Z':
+ return _parse_time_naive(timestr).replace(tzinfo=build_utcoffset('UTC', datetime.timedelta(hours=0)))
+ else:
+ return _parse_time_naive(timestr).replace(tzinfo=parse_timezone(tzstr))
+
+def parse_datetime(isodatetimestr, delimiter='T'):
+ #Given a string in ISO 8601 date time format, return a datetime.datetime
+ #object that corresponds to the given date time.
+ #By default, the ISO 8601 specified T delimiter is used to split the
+ #date and time (T). Fixed offset tzdata will be included
+ #if UTC offset is given in the input string.
+
+ isodatestr, isotimestr = isodatetimestr.split(delimiter)
+
+ datepart = parse_date(isodatestr)
+ timepart = parse_time(isotimestr)
+
+ return datetime.datetime.combine(datepart, timepart)
+
+def _parse_time_naive(timestr):
+ #timestr is of the format hh:mm:ss, hh:mm, hhmmss, hhmm, hh
+ #
+ #hh is between 0 and 24, 24 is not allowed in the Python time format, since
+ #it represents midnight, a time of 00:00:00 is returned
+ #
+ #mm is between 0 and 60, with 60 used to denote a leap second
+ #
+ #No tzinfo will be included
+ return _resolution_map[get_time_resolution(timestr)](timestr)
+
+def _parse_hour(timestr):
+ #Format must be hh or hh.
+ isohour = float(timestr)
+
+ if isohour == 24:
+ return datetime.time(hour=0, minute=0)
+
+ #Since the time constructor doesn't handle fractional hours, we put
+ #the hours in to a timedelta, and add it to the time before returning
+ hoursdelta = datetime.timedelta(hours=isohour)
+
+ return _build_time(datetime.time(hour=0), hoursdelta)
+
+def _parse_minute_time(timestr):
+ #Format must be hhmm, hhmm., hh:mm or hh:mm.
+ if timestr.count(':') == 1:
+ #hh:mm or hh:mm.
+ timestrarray = timestr.split(':')
+
+ isohour = int(timestrarray[0])
+ isominute = float(timestrarray[1]) #Minute may now be a fraction
+ else:
+ #hhmm or hhmm.
+ isohour = int(timestr[0:2])
+ isominute = float(timestr[2:])
+
+ if isominute > 60:
+ raise ValueError('ISO 8601 minute element cannot be greater than 60.')
+
+ if isohour == 24:
+ return datetime.time(hour=0, minute=0)
+
+ #Since the time constructor doesn't handle fractional minutes, we put
+ #the minutes in to a timedelta, and add it to the time before returning
+ minutesdelta = datetime.timedelta(minutes = isominute)
+
+ return _build_time(datetime.time(hour=isohour), minutesdelta)
+
+def _parse_second_time(timestr):
+ #Format must be hhmmss, hhmmss., hh:mm:ss or hh:mm:ss.
+ if timestr.count(':') == 2:
+ #hh:mm:ss or hh:mm:ss.
+ timestrarray = timestr.split(':')
+
+ isohour = int(timestrarray[0])
+ isominute = int(timestrarray[1])
+
+ #Since the time constructor doesn't handle fractional seconds, we put
+ #the seconds in to a timedelta, and add it to the time before returning
+ secondsdelta = datetime.timedelta(seconds = float(timestrarray[2]))
+ else:
+ #hhmmss or hhmmss.
+ isohour = int(timestr[0:2])
+ isominute = int(timestr[2:4])
+
+ #Since the time constructor doesn't handle fractional seconds, we put
+ #the seconds in to a timedelta, and add it to the time before returning
+ secondsdelta = datetime.timedelta(seconds = float(timestr[4:]))
+
+ if secondsdelta.seconds >= 60:
+ #https://bitbucket.org/nielsenb/aniso8601/issues/13/parsing-of-leap-second-gives-wildly
+ raise ValueError('Seconds must be less than 60.')
+
+ if isominute > 60:
+ raise ValueError('ISO 8601 minute element cannot be greater than 60.')
+
+ if isohour == 24:
+ return datetime.time(hour=0, minute=0)
+
+ return _build_time(datetime.time(hour=isohour, minute=isominute),
+ secondsdelta)
+
+def _build_time(time, delta):
+ #Combine today's date (just so we have a date object), the time, the
+ #delta, and return the time component
+ base_datetime = datetime.datetime.combine(datetime.date.today(), time)
+ return (base_datetime + delta).time()
+
+def _split_tz(isotimestr):
+ if isotimestr.find('+') != -1:
+ timestr = isotimestr[0:isotimestr.find('+')]
+ tzstr = isotimestr[isotimestr.find('+'):]
+ elif isotimestr.find('-') != -1:
+ timestr = isotimestr[0:isotimestr.find('-')]
+ tzstr = isotimestr[isotimestr.find('-'):]
+ elif isotimestr.endswith('Z'):
+ timestr = isotimestr[:-1]
+ tzstr = 'Z'
+ else:
+ timestr = isotimestr
+ tzstr = None
+ return (timestr, tzstr)
+
+_resolution_map = {
+ TimeResolution.Hours: _parse_hour,
+ TimeResolution.Minutes: _parse_minute_time,
+ TimeResolution.Seconds: _parse_second_time
+}
diff --git a/appWSM/lib/python2.7/site-packages/aniso8601/timezone.py b/appWSM/lib/python2.7/site-packages/aniso8601/timezone.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f5ee151395fa3bbcc1552f94c78d459e7ca5ced
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/aniso8601/timezone.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Brandon Nielsen
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import datetime
+
+def parse_timezone(tzstr):
+ #tzstr can be ±hh:mm, ±hhmm, ±hh, the Z case is handled elsewhere
+
+ tzstrlen = len(tzstr)
+
+ if tzstrlen == 6:
+ #±hh:mm
+ tzhour = int(tzstr[1:3])
+ tzminute = int(tzstr[4:6])
+
+ if tzstr[0] == '+':
+ return build_utcoffset(tzstr, datetime.timedelta(hours=tzhour, minutes=tzminute))
+ else:
+ if tzhour == 0 and tzminute == 0:
+ raise ValueError('Negative ISO 8601 time offset cannot be 0.')
+ else:
+ return build_utcoffset(tzstr, -datetime.timedelta(hours=tzhour, minutes=tzminute))
+ elif tzstrlen == 5:
+ #±hhmm
+ tzhour = int(tzstr[1:3])
+ tzminute = int(tzstr[3:5])
+
+ if tzstr[0] == '+':
+ return build_utcoffset(tzstr, datetime.timedelta(hours=tzhour, minutes=tzminute))
+ else:
+ if tzhour == 0 and tzminute == 0:
+ raise ValueError('Negative ISO 8601 time offset cannot be 0.')
+ else:
+ return build_utcoffset(tzstr, -datetime.timedelta(hours=tzhour, minutes=tzminute))
+ elif tzstrlen == 3:
+ #±hh
+ tzhour = int(tzstr[1:3])
+
+ if tzstr[0] == '+':
+ return build_utcoffset(tzstr, datetime.timedelta(hours=tzhour))
+ else:
+ if tzhour == 0:
+ raise ValueError('Negative ISO 8601 time offset cannot be 0.')
+ else:
+ return build_utcoffset(tzstr, -datetime.timedelta(hours=tzhour))
+ else:
+ raise ValueError('String is not a valid ISO 8601 time offset.')
+
+def build_utcoffset(name, utcdelta):
+ #We build an offset in this manner since the
+ #tzinfo class must have an init that can
+ #"method that can be called with no arguments"
+
+ returnoffset = UTCOffset()
+
+ returnoffset.setname(name)
+ returnoffset.setutcdelta(utcdelta)
+
+ return returnoffset
+
+class UTCOffset(datetime.tzinfo):
+ def __repr__(self):
+ if self._utcdelta >= datetime.timedelta(hours=0):
+ return '+{0} UTC'.format(self._utcdelta)
+ else:
+ #From the docs:
+ #String representations of timedelta objects are normalized
+ #similarly to their internal representation. This leads to
+ #somewhat unusual results for negative timedeltas.
+ #
+ #Clean this up for printing purposes
+ correctedDays = abs(self._utcdelta.days + 1) #Negative deltas start at -1 day
+
+ deltaSeconds = (24 * 60 * 60) - self._utcdelta.seconds #Negative deltas have a positive seconds
+
+ days, remainder = divmod(deltaSeconds, 24 * 60 * 60) #(24 hours / day) * (60 minutes / hour) * (60 seconds / hour)
+ hours, remainder = divmod(remainder, 1 * 60 * 60) #(1 hour) * (60 minutes / hour) * (60 seconds / hour)
+ minutes, seconds = divmod(remainder, 1 * 60) #(1 minute) * (60 seconds / minute)
+
+ #Add any remaining days to the correctedDays count
+ correctedDays += days
+
+ if correctedDays == 0:
+ return '-{0}:{1:02}:{2:02} UTC'.format(hours, minutes, seconds)
+ else:
+ if correctedDays == 1:
+ return '-1 day, {0}:{1:02}:{2:02} UTC'.format(hours, minutes, seconds)
+ else:
+ return '-{0} days, {1}:{2:02}:{3:02} UTC'.format(correctedDays, hours, minutes, seconds)
+
+ def setname(self, name):
+ self._name = name
+
+ def setutcdelta(self, utcdelta):
+ self._utcdelta = utcdelta
+
+ def utcoffset(self, dt):
+ return self._utcdelta
+
+ def tzname(self, dt):
+ return self._name
+
+ def dst(self, dt):
+ #ISO 8601 specifies offsets should be different if DST is required,
+ #instead of allowing for a DST to be specified
+ # https://docs.python.org/2/library/datetime.html#datetime.tzinfo.dst
+ return datetime.timedelta(0)
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/DESCRIPTION.rst b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e1187231a3553e6c1bec45c6b5a4e4e62b8ef70d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/DESCRIPTION.rst
@@ -0,0 +1,3 @@
+UNKNOWN
+
+
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/INSTALLER b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/METADATA b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..1f108853b25cb3299386cea179c6544712320afd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/METADATA
@@ -0,0 +1,16 @@
+Metadata-Version: 2.0
+Name: click
+Version: 6.7
+Summary: A simple wrapper around optparse for powerful command line utilities.
+Home-page: http://github.com/mitsuhiko/click
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+
+UNKNOWN
+
+
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/RECORD b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..fd18d4edfe38cf543467cd20f95d2bebbae36f54
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/RECORD
@@ -0,0 +1,41 @@
+click/__init__.py,sha256=k8R00cFKWI8dhDVKQeLBlAdNh1CxerMEDRiGnr32gdw,2858
+click/_bashcomplete.py,sha256=82rMiibtEurdwBq60NHXVCBuGXJHDpblFO9o2YxJDF0,2423
+click/_compat.py,sha256=j59MpzxYGE-fTGj0A5sg8UI8GhHod1XMojiCA0jvbL0,21011
+click/_termui_impl.py,sha256=Ol1JJhvBRw3l8j1WIU0tOWjQtxxmwGE44lFDbzDqzoA,16395
+click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198
+click/_unicodefun.py,sha256=A3UOzJw6lEZyol2SBg3fNXgweTutaOzkJ61OB7vik3Y,4204
+click/_winconsole.py,sha256=MzG46DEYPoRyx4SO7EIhFuFZHESgooAfJLIukbB6p5c,7790
+click/core.py,sha256=M0nJ6Kkye7XZXYG7HCbkJWSfy14WHV6bQmGLACrOhKw,70254
+click/decorators.py,sha256=y7CX2needh8iRWafj-QS_hGQFsN24eyXAhx5Y2ATwas,10941
+click/exceptions.py,sha256=rOa0pP3PbSy0_AAPOW9irBEM8AJ3BySN-4z2VUwFVo4,6788
+click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889
+click/globals.py,sha256=PAgnKvGxq4YuEIldw3lgYOGBLYwsyxnm1IByBX3BFXo,1515
+click/parser.py,sha256=i01xgYuIA6AwQWEXjshwHSwnTR3gUep4FxJIfyW4ta4,15510
+click/termui.py,sha256=Bp99MSWQtyoWe1_7HggDmA77n--3KLxu7NsZMFMaCUo,21008
+click/testing.py,sha256=kJ9mjtJgwNAlkgKcFf9-ISxufmaPDbbuOHVC9WIvKdY,11002
+click/types.py,sha256=ZGb2lmFs5Vwd9loTRIMbGcqhPVOql8mGoBhWBRT6V4E,18864
+click/utils.py,sha256=1jalPlkUU28JReTEQeeSFtbJd-SirYWBNfjtELBKzT4,14916
+click-6.7.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
+click-6.7.dist-info/METADATA,sha256=l6lAyogIUXiHKUK_rWguef-EMcvO5C6bXzFCNCcblbQ,424
+click-6.7.dist-info/RECORD,,
+click-6.7.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
+click-6.7.dist-info/metadata.json,sha256=qg0uO6amNHkIkOxnmWX7Xa_DNQMQ62Q6drivuP9Gh1c,571
+click-6.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
+click-6.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+click/exceptions.pyc,,
+click/_unicodefun.pyc,,
+click/_compat.pyc,,
+click/_textwrap.pyc,,
+click/formatting.pyc,,
+click/decorators.pyc,,
+click/_bashcomplete.pyc,,
+click/parser.pyc,,
+click/testing.pyc,,
+click/utils.pyc,,
+click/types.pyc,,
+click/termui.pyc,,
+click/core.pyc,,
+click/_winconsole.pyc,,
+click/__init__.pyc,,
+click/_termui_impl.pyc,,
+click/globals.pyc,,
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/WHEEL b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7bf9daa1ada4a6a6a9bfbf07f3b1a9f84a71bb32
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0.a0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/metadata.json b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..0a4cfb1898becf162890c07a372be9fa23a150fe
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/click"}}}, "generator": "bdist_wheel (0.30.0.a0)", "metadata_version": "2.0", "name": "click", "summary": "A simple wrapper around optparse for powerful command line utilities.", "version": "6.7"}
\ No newline at end of file
diff --git a/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/top_level.txt b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dca9a909647e3b066931de2909c2d1e65c78c995
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click-6.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+click
diff --git a/appWSM/lib/python2.7/site-packages/click/__init__.py b/appWSM/lib/python2.7/site-packages/click/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..971e55d0a8c6dea5087eeb99cf2cda69823ee470
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/__init__.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+"""
+ click
+ ~~~~~
+
+ Click is a simple Python module that wraps the stdlib's optparse to make
+ writing command line scripts fun. Unlike other modules, it's based around
+ a simple API that does not come with too much magic and is composable.
+
+ In case optparse ever gets removed from the stdlib, it will be shipped by
+ this module.
+
+ :copyright: (c) 2014 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+# Core classes
+from .core import Context, BaseCommand, Command, MultiCommand, Group, \
+ CommandCollection, Parameter, Option, Argument
+
+# Globals
+from .globals import get_current_context
+
+# Decorators
+from .decorators import pass_context, pass_obj, make_pass_decorator, \
+ command, group, argument, option, confirmation_option, \
+ password_option, version_option, help_option
+
+# Types
+from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
+ STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
+
+# Utilities
+from .utils import echo, get_binary_stream, get_text_stream, open_file, \
+ format_filename, get_app_dir, get_os_args
+
+# Terminal functions
+from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
+ progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
+ pause
+
+# Exceptions
+from .exceptions import ClickException, UsageError, BadParameter, \
+ FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
+ MissingParameter
+
+# Formatting
+from .formatting import HelpFormatter, wrap_text
+
+# Parsing
+from .parser import OptionParser
+
+
+__all__ = [
+ # Core classes
+ 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
+ 'CommandCollection', 'Parameter', 'Option', 'Argument',
+
+ # Globals
+ 'get_current_context',
+
+ # Decorators
+ 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
+ 'argument', 'option', 'confirmation_option', 'password_option',
+ 'version_option', 'help_option',
+
+ # Types
+ 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
+ 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
+
+ # Utilities
+ 'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
+ 'format_filename', 'get_app_dir', 'get_os_args',
+
+ # Terminal functions
+ 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
+ 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
+ 'getchar', 'pause',
+
+ # Exceptions
+ 'ClickException', 'UsageError', 'BadParameter', 'FileError',
+ 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
+ 'MissingParameter',
+
+ # Formatting
+ 'HelpFormatter', 'wrap_text',
+
+ # Parsing
+ 'OptionParser',
+]
+
+
+# Controls if click should emit the warning about the use of unicode
+# literals.
+disable_unicode_literals_warning = False
+
+
+__version__ = '6.7'
diff --git a/appWSM/lib/python2.7/site-packages/click/_bashcomplete.py b/appWSM/lib/python2.7/site-packages/click/_bashcomplete.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9d26d28b053de585428e573adaf4ac1bc842aa9
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_bashcomplete.py
@@ -0,0 +1,83 @@
+import os
+import re
+from .utils import echo
+from .parser import split_arg_string
+from .core import MultiCommand, Option
+
+
+COMPLETION_SCRIPT = '''
+%(complete_func)s() {
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ %(autocomplete_var)s=complete $1 ) )
+ return 0
+}
+
+complete -F %(complete_func)s -o default %(script_names)s
+'''
+
+_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
+
+
+def get_completion_script(prog_name, complete_var):
+ cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
+ return (COMPLETION_SCRIPT % {
+ 'complete_func': '_%s_completion' % cf_name,
+ 'script_names': prog_name,
+ 'autocomplete_var': complete_var,
+ }).strip() + ';'
+
+
+def resolve_ctx(cli, prog_name, args):
+ ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+ while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):
+ a = ctx.protected_args + ctx.args
+ cmd = ctx.command.get_command(ctx, a[0])
+ if cmd is None:
+ return None
+ ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)
+ return ctx
+
+
+def get_choices(cli, prog_name, args, incomplete):
+ ctx = resolve_ctx(cli, prog_name, args)
+ if ctx is None:
+ return
+
+ choices = []
+ if incomplete and not incomplete[:1].isalnum():
+ for param in ctx.command.params:
+ if not isinstance(param, Option):
+ continue
+ choices.extend(param.opts)
+ choices.extend(param.secondary_opts)
+ elif isinstance(ctx.command, MultiCommand):
+ choices.extend(ctx.command.list_commands(ctx))
+
+ for item in choices:
+ if item.startswith(incomplete):
+ yield item
+
+
+def do_complete(cli, prog_name):
+ cwords = split_arg_string(os.environ['COMP_WORDS'])
+ cword = int(os.environ['COMP_CWORD'])
+ args = cwords[1:cword]
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ''
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ echo(item)
+
+ return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+ if complete_instr == 'source':
+ echo(get_completion_script(prog_name, complete_var))
+ return True
+ elif complete_instr == 'complete':
+ return do_complete(cli, prog_name)
+ return False
diff --git a/appWSM/lib/python2.7/site-packages/click/_compat.py b/appWSM/lib/python2.7/site-packages/click/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b43412c4d60260390722afc09e0bf5a6bf8a225
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_compat.py
@@ -0,0 +1,648 @@
+import re
+import io
+import os
+import sys
+import codecs
+from weakref import WeakKeyDictionary
+
+
+PY2 = sys.version_info[0] == 2
+WIN = sys.platform.startswith('win')
+DEFAULT_COLUMNS = 80
+
+
+_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
+
+
+def get_filesystem_encoding():
+ return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(stream, encoding, errors):
+ if encoding is None:
+ encoding = get_best_encoding(stream)
+ if errors is None:
+ errors = 'replace'
+ return _NonClosingTextIOWrapper(stream, encoding, errors,
+ line_buffering=True)
+
+
+def is_ascii_encoding(encoding):
+ """Checks if a given encoding is ascii."""
+ try:
+ return codecs.lookup(encoding).name == 'ascii'
+ except LookupError:
+ return False
+
+
+def get_best_encoding(stream):
+ """Returns the default stream encoding if not found."""
+ rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
+ if is_ascii_encoding(rv):
+ return 'utf-8'
+ return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+
+ def __init__(self, stream, encoding, errors, **extra):
+ self._stream = stream = _FixupStream(stream)
+ io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
+
+ # The io module is a place where the Python 3 text behavior
+ # was forced upon Python 2, so we need to unbreak
+ # it to look like Python 2.
+ if PY2:
+ def write(self, x):
+ if isinstance(x, str) or is_bytes(x):
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(str(x))
+ return io.TextIOWrapper.write(self, x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __del__(self):
+ try:
+ self.detach()
+ except Exception:
+ pass
+
+ def isatty(self):
+ # https://bitbucket.org/pypy/pypy/issue/1803
+ return self._stream.isatty()
+
+
+class _FixupStream(object):
+ """The new io interface needs more from streams than streams
+ traditionally implement. As such, this fix-up code is necessary in
+ some circumstances.
+ """
+
+ def __init__(self, stream):
+ self._stream = stream
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+ def read1(self, size):
+ f = getattr(self._stream, 'read1', None)
+ if f is not None:
+ return f(size)
+ # We only dispatch to readline instead of read in Python 2 as we
+ # do not want cause problems with the different implementation
+ # of line buffering.
+ if PY2:
+ return self._stream.readline(size)
+ return self._stream.read(size)
+
+ def readable(self):
+ x = getattr(self._stream, 'readable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.read(0)
+ except Exception:
+ return False
+ return True
+
+ def writable(self):
+ x = getattr(self._stream, 'writable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.write('')
+ except Exception:
+ try:
+ self._stream.write(b'')
+ except Exception:
+ return False
+ return True
+
+ def seekable(self):
+ x = getattr(self._stream, 'seekable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.seek(self._stream.tell())
+ except Exception:
+ return False
+ return True
+
+
+if PY2:
+ text_type = unicode
+ bytes = str
+ raw_input = raw_input
+ string_types = (str, unicode)
+ iteritems = lambda x: x.iteritems()
+ range_type = xrange
+
+ def is_bytes(x):
+ return isinstance(x, (buffer, bytearray))
+
+ _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
+
+ # For Windows, we need to force stdout/stdin/stderr to binary if it's
+ # fetched for that. This obviously is not the most correct way to do
+ # it as it changes global state. Unfortunately, there does not seem to
+ # be a clear better way to do it as just reopening the file in binary
+ # mode does not change anything.
+ #
+ # An option would be to do what Python 3 does and to open the file as
+ # binary only, patch it back to the system, and then use a wrapper
+ # stream that converts newlines. It's not quite clear what's the
+ # correct option here.
+ #
+ # This code also lives in _winconsole for the fallback to the console
+ # emulation stream.
+ #
+ # There are also Windows environments where the `msvcrt` module is not
+ # available (which is why we use try-catch instead of the WIN variable
+ # here), such as the Google App Engine development server on Windows. In
+ # those cases there is just nothing we can do.
+ try:
+ import msvcrt
+ except ImportError:
+ set_binary_mode = lambda x: x
+ else:
+ def set_binary_mode(f):
+ try:
+ fileno = f.fileno()
+ except Exception:
+ pass
+ else:
+ msvcrt.setmode(fileno, os.O_BINARY)
+ return f
+
+ def isidentifier(x):
+ return _identifier_re.search(x) is not None
+
+ def get_binary_stdin():
+ return set_binary_mode(sys.stdin)
+
+ def get_binary_stdout():
+ return set_binary_mode(sys.stdout)
+
+ def get_binary_stderr():
+ return set_binary_mode(sys.stderr)
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdin, encoding, errors)
+
+ def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdout, encoding, errors)
+
+ def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stderr, encoding, errors)
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), 'replace')
+ return value
+else:
+ import io
+ text_type = str
+ raw_input = input
+ string_types = (str,)
+ range_type = range
+ isidentifier = lambda x: x.isidentifier()
+ iteritems = lambda x: iter(x.items())
+
+ def is_bytes(x):
+ return isinstance(x, (bytes, memoryview, bytearray))
+
+ def _is_binary_reader(stream, default=False):
+ try:
+ return isinstance(stream.read(0), bytes)
+ except Exception:
+ return default
+ # This happens in some cases where the stream was already
+ # closed. In this case, we assume the default.
+
+ def _is_binary_writer(stream, default=False):
+ try:
+ stream.write(b'')
+ except Exception:
+ try:
+ stream.write('')
+ return False
+ except Exception:
+ pass
+ return default
+ return True
+
+ def _find_binary_reader(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_reader(stream, False):
+ return stream
+
+ buf = getattr(stream, 'buffer', None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_reader(buf, True):
+ return buf
+
+ def _find_binary_writer(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detatching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_writer(stream, False):
+ return stream
+
+ buf = getattr(stream, 'buffer', None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_writer(buf, True):
+ return buf
+
+ def _stream_is_misconfigured(stream):
+ """A stream is misconfigured if its encoding is ASCII."""
+ # If the stream does not have an encoding set, we assume it's set
+ # to ASCII. This appears to happen in certain unittest
+ # environments. It's not quite clear what the correct behavior is
+ # but this at least will force Click to recover somehow.
+ return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
+
+ def _is_compatible_text_stream(stream, encoding, errors):
+ stream_encoding = getattr(stream, 'encoding', None)
+ stream_errors = getattr(stream, 'errors', None)
+
+ # Perfect match.
+ if stream_encoding == encoding and stream_errors == errors:
+ return True
+
+ # Otherwise, it's only a compatible stream if we did not ask for
+ # an encoding.
+ if encoding is None:
+ return stream_encoding is not None
+
+ return False
+
+ def _force_correct_text_reader(text_reader, encoding, errors):
+ if _is_binary_reader(text_reader, False):
+ binary_reader = text_reader
+ else:
+ # If there is no target encoding set, we need to verify that the
+ # reader is not actually misconfigured.
+ if encoding is None and not _stream_is_misconfigured(text_reader):
+ return text_reader
+
+ if _is_compatible_text_stream(text_reader, encoding, errors):
+ return text_reader
+
+ # If the reader has no encoding, we try to find the underlying
+ # binary reader for it. If that fails because the environment is
+ # misconfigured, we silently go with the same reader because this
+ # is too common to happen. In that case, mojibake is better than
+ # exceptions.
+ binary_reader = _find_binary_reader(text_reader)
+ if binary_reader is None:
+ return text_reader
+
+ # At this point, we default the errors to replace instead of strict
+ # because nobody handles those errors anyways and at this point
+ # we're so fundamentally fucked that nothing can repair it.
+ if errors is None:
+ errors = 'replace'
+ return _make_text_stream(binary_reader, encoding, errors)
+
+ def _force_correct_text_writer(text_writer, encoding, errors):
+ if _is_binary_writer(text_writer, False):
+ binary_writer = text_writer
+ else:
+ # If there is no target encoding set, we need to verify that the
+ # writer is not actually misconfigured.
+ if encoding is None and not _stream_is_misconfigured(text_writer):
+ return text_writer
+
+ if _is_compatible_text_stream(text_writer, encoding, errors):
+ return text_writer
+
+ # If the writer has no encoding, we try to find the underlying
+ # binary writer for it. If that fails because the environment is
+ # misconfigured, we silently go with the same writer because this
+ # is too common to happen. In that case, mojibake is better than
+ # exceptions.
+ binary_writer = _find_binary_writer(text_writer)
+ if binary_writer is None:
+ return text_writer
+
+ # At this point, we default the errors to replace instead of strict
+ # because nobody handles those errors anyways and at this point
+ # we're so fundamentally fucked that nothing can repair it.
+ if errors is None:
+ errors = 'replace'
+ return _make_text_stream(binary_writer, encoding, errors)
+
+ def get_binary_stdin():
+ reader = _find_binary_reader(sys.stdin)
+ if reader is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stdin.')
+ return reader
+
+ def get_binary_stdout():
+ writer = _find_binary_writer(sys.stdout)
+ if writer is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stdout.')
+ return writer
+
+ def get_binary_stderr():
+ writer = _find_binary_writer(sys.stderr)
+ if writer is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stderr.')
+ return writer
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_reader(sys.stdin, encoding, errors)
+
+ def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stdout, encoding, errors)
+
+ def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stderr, encoding, errors)
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), 'replace')
+ else:
+ value = value.encode('utf-8', 'surrogateescape') \
+ .decode('utf-8', 'replace')
+ return value
+
+
+def get_streerror(e, default=None):
+ if hasattr(e, 'strerror'):
+ msg = e.strerror
+ else:
+ if default is not None:
+ msg = default
+ else:
+ msg = str(e)
+ if isinstance(msg, bytes):
+ msg = msg.decode('utf-8', 'replace')
+ return msg
+
+
+def open_stream(filename, mode='r', encoding=None, errors='strict',
+ atomic=False):
+ # Standard streams first. These are simple because they don't need
+ # special handling for the atomic flag. It's entirely ignored.
+ if filename == '-':
+ if 'w' in mode:
+ if 'b' in mode:
+ return get_binary_stdout(), False
+ return get_text_stdout(encoding=encoding, errors=errors), False
+ if 'b' in mode:
+ return get_binary_stdin(), False
+ return get_text_stdin(encoding=encoding, errors=errors), False
+
+ # Non-atomic writes directly go out through the regular open functions.
+ if not atomic:
+ if encoding is None:
+ return open(filename, mode), True
+ return io.open(filename, mode, encoding=encoding, errors=errors), True
+
+ # Some usability stuff for atomic writes
+ if 'a' in mode:
+ raise ValueError(
+ 'Appending to an existing file is not supported, because that '
+ 'would involve an expensive `copy`-operation to a temporary '
+ 'file. Open the file in normal `w`-mode and copy explicitly '
+ 'if that\'s what you\'re after.'
+ )
+ if 'x' in mode:
+ raise ValueError('Use the `overwrite`-parameter instead.')
+ if 'w' not in mode:
+ raise ValueError('Atomic writes only make sense with `w`-mode.')
+
+ # Atomic writes are more complicated. They work by opening a file
+ # as a proxy in the same folder and then using the fdopen
+ # functionality to wrap it in a Python file. Then we wrap it in an
+ # atomic file that moves the file over on close.
+ import tempfile
+ fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
+ prefix='.__atomic-write')
+
+ if encoding is not None:
+ f = io.open(fd, mode, encoding=encoding, errors=errors)
+ else:
+ f = os.fdopen(fd, mode)
+
+ return _AtomicFile(f, tmp_filename, filename), True
+
+
+# Used in a destructor call, needs extra protection from interpreter cleanup.
+if hasattr(os, 'replace'):
+ _replace = os.replace
+ _can_replace = True
+else:
+ _replace = os.rename
+ _can_replace = not WIN
+
+
+class _AtomicFile(object):
+
+ def __init__(self, f, tmp_filename, real_filename):
+ self._f = f
+ self._tmp_filename = tmp_filename
+ self._real_filename = real_filename
+ self.closed = False
+
+ @property
+ def name(self):
+ return self._real_filename
+
+ def close(self, delete=False):
+ if self.closed:
+ return
+ self._f.close()
+ if not _can_replace:
+ try:
+ os.remove(self._real_filename)
+ except OSError:
+ pass
+ _replace(self._tmp_filename, self._real_filename)
+ self.closed = True
+
+ def __getattr__(self, name):
+ return getattr(self._f, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close(delete=exc_type is not None)
+
+ def __repr__(self):
+ return repr(self._f)
+
+
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+
+
+def strip_ansi(value):
+ return _ansi_re.sub('', value)
+
+
+def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ if stream is None:
+ stream = sys.stdin
+ return not isatty(stream)
+ return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama. This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+ # Windows has a smaller terminal
+ DEFAULT_COLUMNS = 79
+
+ from ._winconsole import _get_windows_console_stream
+
+ def _get_argv_encoding():
+ import locale
+ return locale.getpreferredencoding()
+
+ if PY2:
+ def raw_input(prompt=''):
+ sys.stderr.flush()
+ if prompt:
+ stdout = _default_text_stdout()
+ stdout.write(prompt)
+ stdin = _default_text_stdin()
+ return stdin.readline().rstrip('\r\n')
+
+ try:
+ import colorama
+ except ImportError:
+ pass
+ else:
+ _ansi_stream_wrappers = WeakKeyDictionary()
+
+ def auto_wrap_for_ansi(stream, color=None):
+ """This function wraps a stream so that calls through colorama
+ are issued to the win32 console API to recolor on demand. It
+ also ensures to reset the colors if a write call is interrupted
+ to not destroy the console afterwards.
+ """
+ try:
+ cached = _ansi_stream_wrappers.get(stream)
+ except Exception:
+ cached = None
+ if cached is not None:
+ return cached
+ strip = should_strip_ansi(stream, color)
+ ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+ rv = ansi_wrapper.stream
+ _write = rv.write
+
+ def _safe_write(s):
+ try:
+ return _write(s)
+ except:
+ ansi_wrapper.reset_all()
+ raise
+
+ rv.write = _safe_write
+ try:
+ _ansi_stream_wrappers[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ def get_winterm_size():
+ win = colorama.win32.GetConsoleScreenBufferInfo(
+ colorama.win32.STDOUT).srWindow
+ return win.Right - win.Left, win.Bottom - win.Top
+else:
+ def _get_argv_encoding():
+ return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
+
+ _get_windows_console_stream = lambda *x: None
+
+
+def term_len(x):
+ return len(strip_ansi(x))
+
+
+def isatty(stream):
+ try:
+ return stream.isatty()
+ except Exception:
+ return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+ cache = WeakKeyDictionary()
+ def func():
+ stream = src_func()
+ try:
+ rv = cache.get(stream)
+ except Exception:
+ rv = None
+ if rv is not None:
+ return rv
+ rv = wrapper_func()
+ try:
+ cache[stream] = rv
+ except Exception:
+ pass
+ return rv
+ return func
+
+
+_default_text_stdin = _make_cached_stream_func(
+ lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(
+ lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(
+ lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+ 'stdin': get_binary_stdin,
+ 'stdout': get_binary_stdout,
+ 'stderr': get_binary_stderr,
+}
+
+text_streams = {
+ 'stdin': get_text_stdin,
+ 'stdout': get_text_stdout,
+ 'stderr': get_text_stderr,
+}
diff --git a/appWSM/lib/python2.7/site-packages/click/_termui_impl.py b/appWSM/lib/python2.7/site-packages/click/_termui_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cfd3d5c4a058b0ced59c74469322c709005b22a
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_termui_impl.py
@@ -0,0 +1,547 @@
+"""
+ click._termui_impl
+ ~~~~~~~~~~~~~~~~~~
+
+ This module contains implementations for the termui module. To keep the
+ import time of Click down, some infrequently used functionality is placed
+ in this module and only imported as needed.
+
+ :copyright: (c) 2014 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import time
+import math
+from ._compat import _default_text_stdout, range_type, PY2, isatty, \
+ open_stream, strip_ansi, term_len, get_best_encoding, WIN
+from .utils import echo
+from .exceptions import ClickException
+
+
+if os.name == 'nt':
+ BEFORE_BAR = '\r'
+ AFTER_BAR = '\n'
+else:
+ BEFORE_BAR = '\r\033[?25l'
+ AFTER_BAR = '\033[?25h\n'
+
+
+def _length_hint(obj):
+ """Returns the length hint of an object."""
+ try:
+ return len(obj)
+ except (AttributeError, TypeError):
+ try:
+ get_hint = type(obj).__length_hint__
+ except AttributeError:
+ return None
+ try:
+ hint = get_hint(obj)
+ except TypeError:
+ return None
+ if hint is NotImplemented or \
+ not isinstance(hint, (int, long)) or \
+ hint < 0:
+ return None
+ return hint
+
+
+class ProgressBar(object):
+
+ def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
+ bar_template='%(bar)s', info_sep=' ', show_eta=True,
+ show_percent=None, show_pos=False, item_show_func=None,
+ label=None, file=None, color=None, width=30):
+ self.fill_char = fill_char
+ self.empty_char = empty_char
+ self.bar_template = bar_template
+ self.info_sep = info_sep
+ self.show_eta = show_eta
+ self.show_percent = show_percent
+ self.show_pos = show_pos
+ self.item_show_func = item_show_func
+ self.label = label or ''
+ if file is None:
+ file = _default_text_stdout()
+ self.file = file
+ self.color = color
+ self.width = width
+ self.autowidth = width == 0
+
+ if length is None:
+ length = _length_hint(iterable)
+ if iterable is None:
+ if length is None:
+ raise TypeError('iterable or length is required')
+ iterable = range_type(length)
+ self.iter = iter(iterable)
+ self.length = length
+ self.length_known = length is not None
+ self.pos = 0
+ self.avg = []
+ self.start = self.last_eta = time.time()
+ self.eta_known = False
+ self.finished = False
+ self.max_width = None
+ self.entered = False
+ self.current_item = None
+ self.is_hidden = not isatty(self.file)
+ self._last_line = None
+
+ def __enter__(self):
+ self.entered = True
+ self.render_progress()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.render_finish()
+
+ def __iter__(self):
+ if not self.entered:
+ raise RuntimeError('You need to use progress bars in a with block.')
+ self.render_progress()
+ return self
+
+ def render_finish(self):
+ if self.is_hidden:
+ return
+ self.file.write(AFTER_BAR)
+ self.file.flush()
+
+ @property
+ def pct(self):
+ if self.finished:
+ return 1.0
+ return min(self.pos / (float(self.length) or 1), 1.0)
+
+ @property
+ def time_per_iteration(self):
+ if not self.avg:
+ return 0.0
+ return sum(self.avg) / float(len(self.avg))
+
+ @property
+ def eta(self):
+ if self.length_known and not self.finished:
+ return self.time_per_iteration * (self.length - self.pos)
+ return 0.0
+
+ def format_eta(self):
+ if self.eta_known:
+ t = self.eta + 1
+ seconds = t % 60
+ t /= 60
+ minutes = t % 60
+ t /= 60
+ hours = t % 24
+ t /= 24
+ if t > 0:
+ days = t
+ return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
+ else:
+ return '%02d:%02d:%02d' % (hours, minutes, seconds)
+ return ''
+
+ def format_pos(self):
+ pos = str(self.pos)
+ if self.length_known:
+ pos += '/%s' % self.length
+ return pos
+
+ def format_pct(self):
+ return ('% 4d%%' % int(self.pct * 100))[1:]
+
+ def format_progress_line(self):
+ show_percent = self.show_percent
+
+ info_bits = []
+ if self.length_known:
+ bar_length = int(self.pct * self.width)
+ bar = self.fill_char * bar_length
+ bar += self.empty_char * (self.width - bar_length)
+ if show_percent is None:
+ show_percent = not self.show_pos
+ else:
+ if self.finished:
+ bar = self.fill_char * self.width
+ else:
+ bar = list(self.empty_char * (self.width or 1))
+ if self.time_per_iteration != 0:
+ bar[int((math.cos(self.pos * self.time_per_iteration)
+ / 2.0 + 0.5) * self.width)] = self.fill_char
+ bar = ''.join(bar)
+
+ if self.show_pos:
+ info_bits.append(self.format_pos())
+ if show_percent:
+ info_bits.append(self.format_pct())
+ if self.show_eta and self.eta_known and not self.finished:
+ info_bits.append(self.format_eta())
+ if self.item_show_func is not None:
+ item_info = self.item_show_func(self.current_item)
+ if item_info is not None:
+ info_bits.append(item_info)
+
+ return (self.bar_template % {
+ 'label': self.label,
+ 'bar': bar,
+ 'info': self.info_sep.join(info_bits)
+ }).rstrip()
+
+ def render_progress(self):
+ from .termui import get_terminal_size
+ nl = False
+
+ if self.is_hidden:
+ buf = [self.label]
+ nl = True
+ else:
+ buf = []
+ # Update width in case the terminal has been resized
+ if self.autowidth:
+ old_width = self.width
+ self.width = 0
+ clutter_length = term_len(self.format_progress_line())
+ new_width = max(0, get_terminal_size()[0] - clutter_length)
+ if new_width < old_width:
+ buf.append(BEFORE_BAR)
+ buf.append(' ' * self.max_width)
+ self.max_width = new_width
+ self.width = new_width
+
+ clear_width = self.width
+ if self.max_width is not None:
+ clear_width = self.max_width
+
+ buf.append(BEFORE_BAR)
+ line = self.format_progress_line()
+ line_len = term_len(line)
+ if self.max_width is None or self.max_width < line_len:
+ self.max_width = line_len
+ buf.append(line)
+
+ buf.append(' ' * (clear_width - line_len))
+ line = ''.join(buf)
+
+ # Render the line only if it changed.
+ if line != self._last_line:
+ self._last_line = line
+ echo(line, file=self.file, color=self.color, nl=nl)
+ self.file.flush()
+
+ def make_step(self, n_steps):
+ self.pos += n_steps
+ if self.length_known and self.pos >= self.length:
+ self.finished = True
+
+ if (time.time() - self.last_eta) < 1.0:
+ return
+
+ self.last_eta = time.time()
+ self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
+
+ self.eta_known = self.length_known
+
+ def update(self, n_steps):
+ self.make_step(n_steps)
+ self.render_progress()
+
+ def finish(self):
+ self.eta_known = 0
+ self.current_item = None
+ self.finished = True
+
+ def next(self):
+ if self.is_hidden:
+ return next(self.iter)
+ try:
+ rv = next(self.iter)
+ self.current_item = rv
+ except StopIteration:
+ self.finish()
+ self.render_progress()
+ raise StopIteration()
+ else:
+ self.update(1)
+ return rv
+
+ if not PY2:
+ __next__ = next
+ del next
+
+
+def pager(text, color=None):
+ """Decide what method to use for paging through text."""
+ stdout = _default_text_stdout()
+ if not isatty(sys.stdin) or not isatty(stdout):
+ return _nullpager(stdout, text, color)
+ pager_cmd = (os.environ.get('PAGER', None) or '').strip()
+ if pager_cmd:
+ if WIN:
+ return _tempfilepager(text, pager_cmd, color)
+ return _pipepager(text, pager_cmd, color)
+ if os.environ.get('TERM') in ('dumb', 'emacs'):
+ return _nullpager(stdout, text, color)
+ if WIN or sys.platform.startswith('os2'):
+ return _tempfilepager(text, 'more <', color)
+ if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
+ return _pipepager(text, 'less', color)
+
+ import tempfile
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
+ return _pipepager(text, 'more', color)
+ return _nullpager(stdout, text, color)
+ finally:
+ os.unlink(filename)
+
+
+def _pipepager(text, cmd, color):
+ """Page through text by feeding it to another program. Invoking a
+ pager through this might support colors.
+ """
+ import subprocess
+ env = dict(os.environ)
+
+ # If we're piping to less we might support colors under the
+ # condition that
+ cmd_detail = cmd.rsplit('/', 1)[-1].split()
+ if color is None and cmd_detail[0] == 'less':
+ less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
+ if not less_flags:
+ env['LESS'] = '-R'
+ color = True
+ elif 'r' in less_flags or 'R' in less_flags:
+ color = True
+
+ if not color:
+ text = strip_ansi(text)
+
+ c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+ env=env)
+ encoding = get_best_encoding(c.stdin)
+ try:
+ c.stdin.write(text.encode(encoding, 'replace'))
+ c.stdin.close()
+ except (IOError, KeyboardInterrupt):
+ pass
+
+ # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+ # search or other commands inside less).
+ #
+ # That means when the user hits ^C, the parent process (click) terminates,
+ # but less is still alive, paging the output and messing up the terminal.
+ #
+ # If the user wants to make the pager exit on ^C, they should set
+ # `LESS='-K'`. It's not our decision to make.
+ while True:
+ try:
+ c.wait()
+ except KeyboardInterrupt:
+ pass
+ else:
+ break
+
+
+def _tempfilepager(text, cmd, color):
+ """Page through text by invoking a program on a temporary file."""
+ import tempfile
+ filename = tempfile.mktemp()
+ if not color:
+ text = strip_ansi(text)
+ encoding = get_best_encoding(sys.stdout)
+ with open_stream(filename, 'wb')[0] as f:
+ f.write(text.encode(encoding))
+ try:
+ os.system(cmd + ' "' + filename + '"')
+ finally:
+ os.unlink(filename)
+
+
+def _nullpager(stream, text, color):
+ """Simply print unformatted text. This is the ultimate fallback."""
+ if not color:
+ text = strip_ansi(text)
+ stream.write(text)
+
+
+class Editor(object):
+
+ def __init__(self, editor=None, env=None, require_save=True,
+ extension='.txt'):
+ self.editor = editor
+ self.env = env
+ self.require_save = require_save
+ self.extension = extension
+
+ def get_editor(self):
+ if self.editor is not None:
+ return self.editor
+ for key in 'VISUAL', 'EDITOR':
+ rv = os.environ.get(key)
+ if rv:
+ return rv
+ if WIN:
+ return 'notepad'
+ for editor in 'vim', 'nano':
+ if os.system('which %s >/dev/null 2>&1' % editor) == 0:
+ return editor
+ return 'vi'
+
+ def edit_file(self, filename):
+ import subprocess
+ editor = self.get_editor()
+ if self.env:
+ environ = os.environ.copy()
+ environ.update(self.env)
+ else:
+ environ = None
+ try:
+ c = subprocess.Popen('%s "%s"' % (editor, filename),
+ env=environ, shell=True)
+ exit_code = c.wait()
+ if exit_code != 0:
+ raise ClickException('%s: Editing failed!' % editor)
+ except OSError as e:
+ raise ClickException('%s: Editing failed: %s' % (editor, e))
+
+ def edit(self, text):
+ import tempfile
+
+ text = text or ''
+ if text and not text.endswith('\n'):
+ text += '\n'
+
+ fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
+ try:
+ if WIN:
+ encoding = 'utf-8-sig'
+ text = text.replace('\n', '\r\n')
+ else:
+ encoding = 'utf-8'
+ text = text.encode(encoding)
+
+ f = os.fdopen(fd, 'wb')
+ f.write(text)
+ f.close()
+ timestamp = os.path.getmtime(name)
+
+ self.edit_file(name)
+
+ if self.require_save \
+ and os.path.getmtime(name) == timestamp:
+ return None
+
+ f = open(name, 'rb')
+ try:
+ rv = f.read()
+ finally:
+ f.close()
+ return rv.decode('utf-8-sig').replace('\r\n', '\n')
+ finally:
+ os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+ import subprocess
+
+ def _unquote_file(url):
+ try:
+ import urllib
+ except ImportError:
+ import urllib
+ if url.startswith('file://'):
+ url = urllib.unquote(url[7:])
+ return url
+
+ if sys.platform == 'darwin':
+ args = ['open']
+ if wait:
+ args.append('-W')
+ if locate:
+ args.append('-R')
+ args.append(_unquote_file(url))
+ null = open('/dev/null', 'w')
+ try:
+ return subprocess.Popen(args, stderr=null).wait()
+ finally:
+ null.close()
+ elif WIN:
+ if locate:
+ url = _unquote_file(url)
+ args = 'explorer /select,"%s"' % _unquote_file(
+ url.replace('"', ''))
+ else:
+ args = 'start %s "" "%s"' % (
+ wait and '/WAIT' or '', url.replace('"', ''))
+ return os.system(args)
+
+ try:
+ if locate:
+ url = os.path.dirname(_unquote_file(url)) or '.'
+ else:
+ url = _unquote_file(url)
+ c = subprocess.Popen(['xdg-open', url])
+ if wait:
+ return c.wait()
+ return 0
+ except OSError:
+ if url.startswith(('http://', 'https://')) and not locate and not wait:
+ import webbrowser
+ webbrowser.open(url)
+ return 0
+ return 1
+
+
+def _translate_ch_to_exc(ch):
+ if ch == '\x03':
+ raise KeyboardInterrupt()
+ if ch == '\x04':
+ raise EOFError()
+
+
+if WIN:
+ import msvcrt
+
+ def getchar(echo):
+ rv = msvcrt.getch()
+ if echo:
+ msvcrt.putchar(rv)
+ _translate_ch_to_exc(rv)
+ if PY2:
+ enc = getattr(sys.stdin, 'encoding', None)
+ if enc is not None:
+ rv = rv.decode(enc, 'replace')
+ else:
+ rv = rv.decode('cp1252', 'replace')
+ return rv
+else:
+ import tty
+ import termios
+
+ def getchar(echo):
+ if not isatty(sys.stdin):
+ f = open('/dev/tty')
+ fd = f.fileno()
+ else:
+ fd = sys.stdin.fileno()
+ f = None
+ try:
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ ch = os.read(fd, 32)
+ if echo and isatty(sys.stdout):
+ sys.stdout.write(ch)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ sys.stdout.flush()
+ if f is not None:
+ f.close()
+ except termios.error:
+ pass
+ _translate_ch_to_exc(ch)
+ return ch.decode(get_best_encoding(sys.stdin), 'replace')
diff --git a/appWSM/lib/python2.7/site-packages/click/_textwrap.py b/appWSM/lib/python2.7/site-packages/click/_textwrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e776031eaa92904fc77e934a1125346bc9fed6a
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_textwrap.py
@@ -0,0 +1,38 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ last = reversed_chunks[-1]
+ cut = last[:space_left]
+ res = last[space_left:]
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ @contextmanager
+ def extra_indent(self, indent):
+ old_initial_indent = self.initial_indent
+ old_subsequent_indent = self.subsequent_indent
+ self.initial_indent += indent
+ self.subsequent_indent += indent
+ try:
+ yield
+ finally:
+ self.initial_indent = old_initial_indent
+ self.subsequent_indent = old_subsequent_indent
+
+ def indent_only(self, text):
+ rv = []
+ for idx, line in enumerate(text.splitlines()):
+ indent = self.initial_indent
+ if idx > 0:
+ indent = self.subsequent_indent
+ rv.append(indent + line)
+ return '\n'.join(rv)
diff --git a/appWSM/lib/python2.7/site-packages/click/_unicodefun.py b/appWSM/lib/python2.7/site-packages/click/_unicodefun.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e17a384eff0361d4932e84d1e4d8cb42bb5a11e
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_unicodefun.py
@@ -0,0 +1,118 @@
+import os
+import sys
+import codecs
+
+from ._compat import PY2
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered. Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+click = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+def _find_unicode_literals_frame():
+ import __future__
+ frm = sys._getframe(1)
+ idx = 1
+ while frm is not None:
+ if frm.f_globals.get('__name__', '').startswith('click.'):
+ frm = frm.f_back
+ idx += 1
+ elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
+ return idx
+ else:
+ break
+ return 0
+
+
+def _check_for_unicode_literals():
+ if not __debug__:
+ return
+ if not PY2 or click.disable_unicode_literals_warning:
+ return
+ bad_frame = _find_unicode_literals_frame()
+ if bad_frame <= 0:
+ return
+ from warnings import warn
+ warn(Warning('Click detected the use of the unicode_literals '
+ '__future__ import. This is heavily discouraged '
+ 'because it can introduce subtle bugs in your '
+ 'code. You should instead use explicit u"" literals '
+ 'for your unicode strings. For more information see '
+ 'http://click.pocoo.org/python3/'),
+ stacklevel=bad_frame)
+
+
+def _verify_python3_env():
+ """Ensures that the environment is good for unicode on Python 3."""
+ if PY2:
+ return
+ try:
+ import locale
+ fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+ except Exception:
+ fs_enc = 'ascii'
+ if fs_enc != 'ascii':
+ return
+
+ extra = ''
+ if os.name == 'posix':
+ import subprocess
+ rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).communicate()[0]
+ good_locales = set()
+ has_c_utf8 = False
+
+ # Make sure we're operating on text here.
+ if isinstance(rv, bytes):
+ rv = rv.decode('ascii', 'replace')
+
+ for line in rv.splitlines():
+ locale = line.strip()
+ if locale.lower().endswith(('.utf-8', '.utf8')):
+ good_locales.add(locale)
+ if locale.lower() in ('c.utf8', 'c.utf-8'):
+ has_c_utf8 = True
+
+ extra += '\n\n'
+ if not good_locales:
+ extra += (
+ 'Additional information: on this system no suitable UTF-8\n'
+ 'locales were discovered. This most likely requires resolving\n'
+ 'by reconfiguring the locale system.'
+ )
+ elif has_c_utf8:
+ extra += (
+ 'This system supports the C.UTF-8 locale which is recommended.\n'
+ 'You might be able to resolve your issue by exporting the\n'
+ 'following environment variables:\n\n'
+ ' export LC_ALL=C.UTF-8\n'
+ ' export LANG=C.UTF-8'
+ )
+ else:
+ extra += (
+ 'This system lists a couple of UTF-8 supporting locales that\n'
+ 'you can pick from. The following suitable locales where\n'
+ 'discovered: %s'
+ ) % ', '.join(sorted(good_locales))
+
+ bad_locale = None
+ for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
+ if locale and locale.lower().endswith(('.utf-8', '.utf8')):
+ bad_locale = locale
+ if locale is not None:
+ break
+ if bad_locale is not None:
+ extra += (
+ '\n\nClick discovered that you exported a UTF-8 locale\n'
+ 'but the locale system could not pick up from it because\n'
+ 'it does not exist. The exported locale is "%s" but it\n'
+ 'is not supported'
+ ) % bad_locale
+
+ raise RuntimeError('Click will abort further execution because Python 3 '
+ 'was configured to use ASCII as encoding for the '
+ 'environment. Consult http://click.pocoo.org/python3/'
+ 'for mitigation steps.' + extra)
diff --git a/appWSM/lib/python2.7/site-packages/click/_winconsole.py b/appWSM/lib/python2.7/site-packages/click/_winconsole.py
new file mode 100644
index 0000000000000000000000000000000000000000..9aed942162bbdd8683ed28a6de30727628d0054c
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/_winconsole.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prmopt.
+
+import io
+import os
+import sys
+import zlib
+import time
+import ctypes
+import msvcrt
+from click._compat import _NonClosingTextIOWrapper, text_type, PY2
+from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
+ c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
+try:
+ from ctypes import pythonapi
+ PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+ PyBuffer_Release = pythonapi.PyBuffer_Release
+except ImportError:
+ pythonapi = None
+from ctypes.wintypes import LPWSTR, LPCWSTR
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(
+ ('GetCommandLineW', windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(
+ POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+ ('CommandLineToArgvW', windll.shell32))
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b'\x1a'
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+ _fields_ = [
+ ('buf', c_void_p),
+ ('obj', py_object),
+ ('len', c_ssize_t),
+ ('itemsize', c_ssize_t),
+ ('readonly', c_int),
+ ('ndim', c_int),
+ ('format', c_char_p),
+ ('shape', c_ssize_p),
+ ('strides', c_ssize_p),
+ ('suboffsets', c_ssize_p),
+ ('internal', c_void_p)
+ ]
+
+ if PY2:
+ _fields_.insert(-1, ('smalltable', c_ssize_t * 2))
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# serverly limited.
+if pythonapi is None:
+ get_buffer = None
+else:
+ def get_buffer(obj, writable=False):
+ buf = Py_buffer()
+ flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+ PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+ try:
+ buffer_type = c_char * buf.len
+ return buffer_type.from_address(buf.buf)
+ finally:
+ PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+
+ def __init__(self, handle):
+ self.handle = handle
+
+ def isatty(self):
+ io.RawIOBase.isatty(self)
+ return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ bytes_to_be_read = len(b)
+ if not bytes_to_be_read:
+ return 0
+ elif bytes_to_be_read % 2:
+ raise ValueError('cannot read odd number of bytes from '
+ 'UTF-16-LE encoded console')
+
+ buffer = get_buffer(b, writable=True)
+ code_units_to_be_read = bytes_to_be_read // 2
+ code_units_read = c_ulong()
+
+ rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
+ byref(code_units_read), None)
+ if GetLastError() == ERROR_OPERATION_ABORTED:
+ # wait for KeyboardInterrupt
+ time.sleep(0.1)
+ if not rv:
+ raise OSError('Windows error: %s' % GetLastError())
+
+ if buffer[0] == EOF:
+ return 0
+ return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+
+ def writable(self):
+ return True
+
+ @staticmethod
+ def _get_error_message(errno):
+ if errno == ERROR_SUCCESS:
+ return 'ERROR_SUCCESS'
+ elif errno == ERROR_NOT_ENOUGH_MEMORY:
+ return 'ERROR_NOT_ENOUGH_MEMORY'
+ return 'Windows error %s' % errno
+
+ def write(self, b):
+ bytes_to_be_written = len(b)
+ buf = get_buffer(b)
+ code_units_to_be_written = min(bytes_to_be_written,
+ MAX_BYTES_WRITTEN) // 2
+ code_units_written = c_ulong()
+
+ WriteConsoleW(self.handle, buf, code_units_to_be_written,
+ byref(code_units_written), None)
+ bytes_written = 2 * code_units_written.value
+
+ if bytes_written == 0 and bytes_to_be_written > 0:
+ raise OSError(self._get_error_message(GetLastError()))
+ return bytes_written
+
+
+class ConsoleStream(object):
+
+ def __init__(self, text_stream, byte_stream):
+ self._text_stream = text_stream
+ self.buffer = byte_stream
+
+ @property
+ def name(self):
+ return self.buffer.name
+
+ def write(self, x):
+ if isinstance(x, text_type):
+ return self._text_stream.write(x)
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._text_stream, name)
+
+ def isatty(self):
+ return self.buffer.isatty()
+
+ def __repr__(self):
+ return '' % (
+ self.name,
+ self.encoding,
+ )
+
+
+def _get_text_stdin(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ _WindowsConsoleWriter(STDOUT_HANDLE),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ _WindowsConsoleWriter(STDERR_HANDLE),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+if PY2:
+ def _hash_py_argv():
+ return zlib.crc32('\x00'.join(sys.argv[1:]))
+
+ _initial_argv_hash = _hash_py_argv()
+
+ def _get_windows_argv():
+ argc = c_int(0)
+ argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
+ argv = [argv_unicode[i] for i in range(0, argc.value)]
+
+ if not hasattr(sys, 'frozen'):
+ argv = argv[1:]
+ while len(argv) > 0:
+ arg = argv[0]
+ if not arg.startswith('-') or arg == '-':
+ break
+ argv = argv[1:]
+ if arg.startswith(('-c', '-m')):
+ break
+
+ return argv[1:]
+
+
+_stream_factories = {
+ 0: _get_text_stdin,
+ 1: _get_text_stdout,
+ 2: _get_text_stderr,
+}
+
+
+def _get_windows_console_stream(f, encoding, errors):
+ if get_buffer is not None and \
+ encoding in ('utf-16-le', None) \
+ and errors in ('strict', None) and \
+ hasattr(f, 'isatty') and f.isatty():
+ func = _stream_factories.get(f.fileno())
+ if func is not None:
+ if not PY2:
+ f = getattr(f, 'buffer')
+ if f is None:
+ return None
+ else:
+ # If we are on Python 2 we need to set the stream that we
+ # deal with to binary mode as otherwise the exercise if a
+ # bit moot. The same problems apply as for
+ # get_binary_stdin and friends from _compat.
+ msvcrt.setmode(f.fileno(), os.O_BINARY)
+ return func(f)
diff --git a/appWSM/lib/python2.7/site-packages/click/core.py b/appWSM/lib/python2.7/site-packages/click/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..74564514753131ac4f00541ba08dcafbf3d902f8
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/core.py
@@ -0,0 +1,1744 @@
+import errno
+import os
+import sys
+from contextlib import contextmanager
+from itertools import repeat
+from functools import update_wrapper
+
+from .types import convert_type, IntRange, BOOL
+from .utils import make_str, make_default_short_help, echo, get_os_args
+from .exceptions import ClickException, UsageError, BadParameter, Abort, \
+ MissingParameter
+from .termui import prompt, confirm
+from .formatting import HelpFormatter, join_options
+from .parser import OptionParser, split_opt
+from .globals import push_context, pop_context
+
+from ._compat import PY2, isidentifier, iteritems
+from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
+
+
+_missing = object()
+
+
+SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
+SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
+
+
+def _bashcomplete(cmd, prog_name, complete_var=None):
+ """Internal handler for the bash completion support."""
+ if complete_var is None:
+ complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
+ complete_instr = os.environ.get(complete_var)
+ if not complete_instr:
+ return
+
+ from ._bashcomplete import bashcomplete
+ if bashcomplete(cmd, prog_name, complete_var, complete_instr):
+ sys.exit(1)
+
+
+def _check_multicommand(base_command, cmd_name, cmd, register=False):
+ if not base_command.chain or not isinstance(cmd, MultiCommand):
+ return
+ if register:
+ hint = 'It is not possible to add multi commands as children to ' \
+ 'another multi command that is in chain mode'
+ else:
+ hint = 'Found a multi command as subcommand to a multi command ' \
+ 'that is in chain mode. This is not supported'
+ raise RuntimeError('%s. Command "%s" is set to chain and "%s" was '
+ 'added as subcommand but it in itself is a '
+ 'multi command. ("%s" is a %s within a chained '
+ '%s named "%s"). This restriction was supposed to '
+ 'be lifted in 6.0 but the fix was flawed. This '
+ 'will be fixed in Click 7.0' % (
+ hint, base_command.name, cmd_name,
+ cmd_name, cmd.__class__.__name__,
+ base_command.__class__.__name__,
+ base_command.name))
+
+
+def batch(iterable, batch_size):
+ return list(zip(*repeat(iter(iterable), batch_size)))
+
+
+def invoke_param_callback(callback, ctx, param, value):
+ code = getattr(callback, '__code__', None)
+ args = getattr(code, 'co_argcount', 3)
+
+ if args < 3:
+ # This will become a warning in Click 3.0:
+ from warnings import warn
+ warn(Warning('Invoked legacy parameter callback "%s". The new '
+ 'signature for such callbacks starting with '
+ 'click 2.0 is (ctx, param, value).'
+ % callback), stacklevel=3)
+ return callback(ctx, value)
+ return callback(ctx, param, value)
+
+
+@contextmanager
+def augment_usage_errors(ctx, param=None):
+ """Context manager that attaches extra information to exceptions that
+ fly.
+ """
+ try:
+ yield
+ except BadParameter as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ if param is not None and e.param is None:
+ e.param = param
+ raise
+ except UsageError as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ raise
+
+
+def iter_params_for_processing(invocation_order, declaration_order):
+ """Given a sequence of parameters in the order as should be considered
+ for processing and an iterable of parameters that exist, this returns
+ a list in the correct order as they should be processed.
+ """
+ def sort_key(item):
+ try:
+ idx = invocation_order.index(item)
+ except ValueError:
+ idx = float('inf')
+ return (not item.is_eager, idx)
+
+ return sorted(declaration_order, key=sort_key)
+
+
+class Context(object):
+ """The context is a special internal object that holds state relevant
+ for the script execution at every single level. It's normally invisible
+ to commands unless they opt-in to getting access to it.
+
+ The context is useful as it can pass internal objects around and can
+ control special execution features such as reading data from
+ environment variables.
+
+ A context can be used as context manager in which case it will call
+ :meth:`close` on teardown.
+
+ .. versionadded:: 2.0
+ Added the `resilient_parsing`, `help_option_names`,
+ `token_normalize_func` parameters.
+
+ .. versionadded:: 3.0
+ Added the `allow_extra_args` and `allow_interspersed_args`
+ parameters.
+
+ .. versionadded:: 4.0
+ Added the `color`, `ignore_unknown_options`, and
+ `max_content_width` parameters.
+
+ :param command: the command class for this context.
+ :param parent: the parent context.
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it is usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param obj: an arbitrary object of user data.
+ :param auto_envvar_prefix: the prefix to use for automatic environment
+ variables. If this is `None` then reading
+ from environment variables is disabled. This
+ does not affect manually set environment
+ variables which are always read.
+ :param default_map: a dictionary (like object) with default values
+ for parameters.
+ :param terminal_width: the width of the terminal. The default is
+ inherit from parent context. If no context
+ defines the terminal width then auto
+ detection will be applied.
+ :param max_content_width: the maximum width for content rendered by
+ Click (this currently only affects help
+ pages). This defaults to 80 characters if
+ not overridden. In other words: even if the
+ terminal is larger than that, Click will not
+ format things wider than 80 characters by
+ default. In addition to that, formatters might
+ add some safety mapping on the right.
+ :param resilient_parsing: if this flag is enabled then Click will
+ parse without any interactivity or callback
+ invocation. This is useful for implementing
+ things such as completion support.
+ :param allow_extra_args: if this is set to `True` then extra arguments
+ at the end will not raise an error and will be
+ kept on the context. The default is to inherit
+ from the command.
+ :param allow_interspersed_args: if this is set to `False` then options
+ and arguments cannot be mixed. The
+ default is to inherit from the command.
+ :param ignore_unknown_options: instructs click to ignore options it does
+ not know and keeps them for later
+ processing.
+ :param help_option_names: optionally a list of strings that define how
+ the default help parameter is named. The
+ default is ``['--help']``.
+ :param token_normalize_func: an optional function that is used to
+ normalize tokens (options, choices,
+ etc.). This for instance can be used to
+ implement case insensitive behavior.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are used in texts that Click prints which is by
+ default not the case. This for instance would affect
+ help output.
+ """
+
+ def __init__(self, command, parent=None, info_name=None, obj=None,
+ auto_envvar_prefix=None, default_map=None,
+ terminal_width=None, max_content_width=None,
+ resilient_parsing=False, allow_extra_args=None,
+ allow_interspersed_args=None,
+ ignore_unknown_options=None, help_option_names=None,
+ token_normalize_func=None, color=None):
+ #: the parent context or `None` if none exists.
+ self.parent = parent
+ #: the :class:`Command` for this context.
+ self.command = command
+ #: the descriptive information name
+ self.info_name = info_name
+ #: the parsed parameters except if the value is hidden in which
+ #: case it's not remembered.
+ self.params = {}
+ #: the leftover arguments.
+ self.args = []
+ #: protected arguments. These are arguments that are prepended
+ #: to `args` when certain parsing scenarios are encountered but
+ #: must be never propagated to another arguments. This is used
+ #: to implement nested parsing.
+ self.protected_args = []
+ if obj is None and parent is not None:
+ obj = parent.obj
+ #: the user object stored.
+ self.obj = obj
+ self._meta = getattr(parent, 'meta', {})
+
+ #: A dictionary (-like object) with defaults for parameters.
+ if default_map is None \
+ and parent is not None \
+ and parent.default_map is not None:
+ default_map = parent.default_map.get(info_name)
+ self.default_map = default_map
+
+ #: This flag indicates if a subcommand is going to be executed. A
+ #: group callback can use this information to figure out if it's
+ #: being executed directly or because the execution flow passes
+ #: onwards to a subcommand. By default it's None, but it can be
+ #: the name of the subcommand to execute.
+ #:
+ #: If chaining is enabled this will be set to ``'*'`` in case
+ #: any commands are executed. It is however not possible to
+ #: figure out which ones. If you require this knowledge you
+ #: should use a :func:`resultcallback`.
+ self.invoked_subcommand = None
+
+ if terminal_width is None and parent is not None:
+ terminal_width = parent.terminal_width
+ #: The width of the terminal (None is autodetection).
+ self.terminal_width = terminal_width
+
+ if max_content_width is None and parent is not None:
+ max_content_width = parent.max_content_width
+ #: The maximum width of formatted content (None implies a sensible
+ #: default which is 80 for most things).
+ self.max_content_width = max_content_width
+
+ if allow_extra_args is None:
+ allow_extra_args = command.allow_extra_args
+ #: Indicates if the context allows extra args or if it should
+ #: fail on parsing.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_extra_args = allow_extra_args
+
+ if allow_interspersed_args is None:
+ allow_interspersed_args = command.allow_interspersed_args
+ #: Indicates if the context allows mixing of arguments and
+ #: options or not.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_interspersed_args = allow_interspersed_args
+
+ if ignore_unknown_options is None:
+ ignore_unknown_options = command.ignore_unknown_options
+ #: Instructs click to ignore options that a command does not
+ #: understand and will store it on the context for later
+ #: processing. This is primarily useful for situations where you
+ #: want to call into external programs. Generally this pattern is
+ #: strongly discouraged because it's not possibly to losslessly
+ #: forward all arguments.
+ #:
+ #: .. versionadded:: 4.0
+ self.ignore_unknown_options = ignore_unknown_options
+
+ if help_option_names is None:
+ if parent is not None:
+ help_option_names = parent.help_option_names
+ else:
+ help_option_names = ['--help']
+
+ #: The names for the help options.
+ self.help_option_names = help_option_names
+
+ if token_normalize_func is None and parent is not None:
+ token_normalize_func = parent.token_normalize_func
+
+ #: An optional normalization function for tokens. This is
+ #: options, choices, commands etc.
+ self.token_normalize_func = token_normalize_func
+
+ #: Indicates if resilient parsing is enabled. In that case Click
+ #: will do its best to not cause any failures.
+ self.resilient_parsing = resilient_parsing
+
+ # If there is no envvar prefix yet, but the parent has one and
+ # the command on this level has a name, we can expand the envvar
+ # prefix automatically.
+ if auto_envvar_prefix is None:
+ if parent is not None \
+ and parent.auto_envvar_prefix is not None and \
+ self.info_name is not None:
+ auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
+ self.info_name.upper())
+ else:
+ self.auto_envvar_prefix = auto_envvar_prefix.upper()
+ self.auto_envvar_prefix = auto_envvar_prefix
+
+ if color is None and parent is not None:
+ color = parent.color
+
+ #: Controls if styling output is wanted or not.
+ self.color = color
+
+ self._close_callbacks = []
+ self._depth = 0
+
+ def __enter__(self):
+ self._depth += 1
+ push_context(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self._depth -= 1
+ if self._depth == 0:
+ self.close()
+ pop_context()
+
+ @contextmanager
+ def scope(self, cleanup=True):
+ """This helper method can be used with the context object to promote
+ it to the current thread local (see :func:`get_current_context`).
+ The default behavior of this is to invoke the cleanup functions which
+ can be disabled by setting `cleanup` to `False`. The cleanup
+ functions are typically used for things such as closing file handles.
+
+ If the cleanup is intended the context object can also be directly
+ used as a context manager.
+
+ Example usage::
+
+ with ctx.scope():
+ assert get_current_context() is ctx
+
+ This is equivalent::
+
+ with ctx:
+ assert get_current_context() is ctx
+
+ .. versionadded:: 5.0
+
+ :param cleanup: controls if the cleanup functions should be run or
+ not. The default is to run these functions. In
+ some situations the context only wants to be
+ temporarily pushed in which case this can be disabled.
+ Nested pushes automatically defer the cleanup.
+ """
+ if not cleanup:
+ self._depth += 1
+ try:
+ with self as rv:
+ yield rv
+ finally:
+ if not cleanup:
+ self._depth -= 1
+
+ @property
+ def meta(self):
+ """This is a dictionary which is shared with all the contexts
+ that are nested. It exists so that click utiltiies can store some
+ state here if they need to. It is however the responsibility of
+ that code to manage this dictionary well.
+
+ The keys are supposed to be unique dotted strings. For instance
+ module paths are a good choice for it. What is stored in there is
+ irrelevant for the operation of click. However what is important is
+ that code that places data here adheres to the general semantics of
+ the system.
+
+ Example usage::
+
+ LANG_KEY = __name__ + '.lang'
+
+ def set_language(value):
+ ctx = get_current_context()
+ ctx.meta[LANG_KEY] = value
+
+ def get_language():
+ return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+ .. versionadded:: 5.0
+ """
+ return self._meta
+
+ def make_formatter(self):
+ """Creates the formatter for the help and usage output."""
+ return HelpFormatter(width=self.terminal_width,
+ max_width=self.max_content_width)
+
+ def call_on_close(self, f):
+ """This decorator remembers a function as callback that should be
+ executed when the context tears down. This is most useful to bind
+ resource handling to the script execution. For instance, file objects
+ opened by the :class:`File` type will register their close callbacks
+ here.
+
+ :param f: the function to execute on teardown.
+ """
+ self._close_callbacks.append(f)
+ return f
+
+ def close(self):
+ """Invokes all close callbacks."""
+ for cb in self._close_callbacks:
+ cb()
+ self._close_callbacks = []
+
+ @property
+ def command_path(self):
+ """The computed command path. This is used for the ``usage``
+ information on the help page. It's automatically created by
+ combining the info names of the chain of contexts to the root.
+ """
+ rv = ''
+ if self.info_name is not None:
+ rv = self.info_name
+ if self.parent is not None:
+ rv = self.parent.command_path + ' ' + rv
+ return rv.lstrip()
+
+ def find_root(self):
+ """Finds the outermost context."""
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ def find_object(self, object_type):
+ """Finds the closest object of a given type."""
+ node = self
+ while node is not None:
+ if isinstance(node.obj, object_type):
+ return node.obj
+ node = node.parent
+
+ def ensure_object(self, object_type):
+ """Like :meth:`find_object` but sets the innermost object to a
+ new instance of `object_type` if it does not exist.
+ """
+ rv = self.find_object(object_type)
+ if rv is None:
+ self.obj = rv = object_type()
+ return rv
+
+ def lookup_default(self, name):
+ """Looks up the default for a parameter name. This by default
+ looks into the :attr:`default_map` if available.
+ """
+ if self.default_map is not None:
+ rv = self.default_map.get(name)
+ if callable(rv):
+ rv = rv()
+ return rv
+
+ def fail(self, message):
+ """Aborts the execution of the program with a specific error
+ message.
+
+ :param message: the error message to fail with.
+ """
+ raise UsageError(message, self)
+
+ def abort(self):
+ """Aborts the script."""
+ raise Abort()
+
+ def exit(self, code=0):
+ """Exits the application with a given exit code."""
+ sys.exit(code)
+
+ def get_usage(self):
+ """Helper method to get formatted usage string for the current
+ context and command.
+ """
+ return self.command.get_usage(self)
+
+ def get_help(self):
+ """Helper method to get formatted help page for the current
+ context and command.
+ """
+ return self.command.get_help(self)
+
+ def invoke(*args, **kwargs):
+ """Invokes a command callback in exactly the way it expects. There
+ are two ways to invoke this method:
+
+ 1. the first argument can be a callback and all other arguments and
+ keyword arguments are forwarded directly to the function.
+ 2. the first argument is a click command object. In that case all
+ arguments are forwarded as well but proper click parameters
+ (options and click arguments) must be keyword arguments and Click
+ will fill in defaults.
+
+ Note that before Click 3.2 keyword arguments were not properly filled
+ in against the intention of this code and no context was created. For
+ more information about this change and why it was done in a bugfix
+ release see :ref:`upgrade-to-3.2`.
+ """
+ self, callback = args[:2]
+ ctx = self
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback. In that case we also fill
+ # in defaults and make a new context for this command.
+ if isinstance(callback, Command):
+ other_cmd = callback
+ callback = other_cmd.callback
+ ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
+ if callback is None:
+ raise TypeError('The given command does not have a '
+ 'callback that can be invoked.')
+
+ for param in other_cmd.params:
+ if param.name not in kwargs and param.expose_value:
+ kwargs[param.name] = param.get_default(ctx)
+
+ args = args[2:]
+ with augment_usage_errors(self):
+ with ctx:
+ return callback(*args, **kwargs)
+
+ def forward(*args, **kwargs):
+ """Similar to :meth:`invoke` but fills in default keyword
+ arguments from the current context if the other command expects
+ it. This cannot invoke callbacks directly, only other commands.
+ """
+ self, cmd = args[:2]
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback.
+ if not isinstance(cmd, Command):
+ raise TypeError('Callback is not a command.')
+
+ for param in self.params:
+ if param not in kwargs:
+ kwargs[param] = self.params[param]
+
+ return self.invoke(cmd, **kwargs)
+
+
+class BaseCommand(object):
+ """The base command implements the minimal API contract of commands.
+ Most code will never use this as it does not implement a lot of useful
+ functionality but it can act as the direct subclass of alternative
+ parsing methods that do not depend on the Click parser.
+
+ For instance, this can be used to bridge Click and other systems like
+ argparse or docopt.
+
+ Because base commands do not implement a lot of the API that other
+ parts of Click take for granted, they are not supported for all
+ operations. For instance, they cannot be used with the decorators
+ usually and they have no built-in callback system.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ """
+ #: the default for the :attr:`Context.allow_extra_args` flag.
+ allow_extra_args = False
+ #: the default for the :attr:`Context.allow_interspersed_args` flag.
+ allow_interspersed_args = True
+ #: the default for the :attr:`Context.ignore_unknown_options` flag.
+ ignore_unknown_options = False
+
+ def __init__(self, name, context_settings=None):
+ #: the name the command thinks it has. Upon registering a command
+ #: on a :class:`Group` the group will default the command name
+ #: with this information. You should instead use the
+ #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+ self.name = name
+ if context_settings is None:
+ context_settings = {}
+ #: an optional dictionary with defaults passed to the context.
+ self.context_settings = context_settings
+
+ def get_usage(self, ctx):
+ raise NotImplementedError('Base commands cannot get usage')
+
+ def get_help(self, ctx):
+ raise NotImplementedError('Base commands cannot get help')
+
+ def make_context(self, info_name, args, parent=None, **extra):
+ """This function when given an info name and arguments will kick
+ off the parsing and create a new :class:`Context`. It does not
+ invoke the actual command callback though.
+
+ :param info_name: the info name for this invokation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it's usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param args: the arguments to parse as list of strings.
+ :param parent: the parent context if available.
+ :param extra: extra keyword arguments forwarded to the context
+ constructor.
+ """
+ for key, value in iteritems(self.context_settings):
+ if key not in extra:
+ extra[key] = value
+ ctx = Context(self, info_name=info_name, parent=parent, **extra)
+ with ctx.scope(cleanup=False):
+ self.parse_args(ctx, args)
+ return ctx
+
+ def parse_args(self, ctx, args):
+ """Given a context and a list of arguments this creates the parser
+ and parses the arguments, then modifies the context as necessary.
+ This is automatically invoked by :meth:`make_context`.
+ """
+ raise NotImplementedError('Base commands do not know how to parse '
+ 'arguments.')
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the command. The default
+ implementation is raising a not implemented error.
+ """
+ raise NotImplementedError('Base commands are not invokable by default')
+
+ def main(self, args=None, prog_name=None, complete_var=None,
+ standalone_mode=True, **extra):
+ """This is the way to invoke a script with all the bells and
+ whistles as a command line application. This will always terminate
+ the application after a call. If this is not wanted, ``SystemExit``
+ needs to be caught.
+
+ This method is also available by directly calling the instance of
+ a :class:`Command`.
+
+ .. versionadded:: 3.0
+ Added the `standalone_mode` flag to control the standalone mode.
+
+ :param args: the arguments that should be used for parsing. If not
+ provided, ``sys.argv[1:]`` is used.
+ :param prog_name: the program name that should be used. By default
+ the program name is constructed by taking the file
+ name from ``sys.argv[0]``.
+ :param complete_var: the environment variable that controls the
+ bash completion support. The default is
+ ``"__COMPLETE"`` with prog name in
+ uppercase.
+ :param standalone_mode: the default behavior is to invoke the script
+ in standalone mode. Click will then
+ handle exceptions and convert them into
+ error messages and the function will never
+ return but shut down the interpreter. If
+ this is set to `False` they will be
+ propagated to the caller and the return
+ value of this function is the return value
+ of :meth:`invoke`.
+ :param extra: extra keyword arguments are forwarded to the context
+ constructor. See :class:`Context` for more information.
+ """
+ # If we are in Python 3, we will verify that the environment is
+ # sane at this point of reject further execution to avoid a
+ # broken script.
+ if not PY2:
+ _verify_python3_env()
+ else:
+ _check_for_unicode_literals()
+
+ if args is None:
+ args = get_os_args()
+ else:
+ args = list(args)
+
+ if prog_name is None:
+ prog_name = make_str(os.path.basename(
+ sys.argv and sys.argv[0] or __file__))
+
+ # Hook for the Bash completion. This only activates if the Bash
+ # completion is actually enabled, otherwise this is quite a fast
+ # noop.
+ _bashcomplete(self, prog_name, complete_var)
+
+ try:
+ try:
+ with self.make_context(prog_name, args, **extra) as ctx:
+ rv = self.invoke(ctx)
+ if not standalone_mode:
+ return rv
+ ctx.exit()
+ except (EOFError, KeyboardInterrupt):
+ echo(file=sys.stderr)
+ raise Abort()
+ except ClickException as e:
+ if not standalone_mode:
+ raise
+ e.show()
+ sys.exit(e.exit_code)
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ sys.exit(1)
+ else:
+ raise
+ except Abort:
+ if not standalone_mode:
+ raise
+ echo('Aborted!', file=sys.stderr)
+ sys.exit(1)
+
+ def __call__(self, *args, **kwargs):
+ """Alias for :meth:`main`."""
+ return self.main(*args, **kwargs)
+
+
+class Command(BaseCommand):
+ """Commands are the basic building block of command line interfaces in
+ Click. A basic command handles command line parsing and might dispatch
+ more parsing to commands nested below it.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ :param callback: the callback to invoke. This is optional.
+ :param params: the parameters to register with this command. This can
+ be either :class:`Option` or :class:`Argument` objects.
+ :param help: the help string to use for this command.
+ :param epilog: like the help string but it's printed at the end of the
+ help page after everything else.
+ :param short_help: the short help to use for this command. This is
+ shown on the command listing of the parent command.
+ :param add_help_option: by default each command registers a ``--help``
+ option. This can be disabled by this parameter.
+ """
+
+ def __init__(self, name, context_settings=None, callback=None,
+ params=None, help=None, epilog=None, short_help=None,
+ options_metavar='[OPTIONS]', add_help_option=True):
+ BaseCommand.__init__(self, name, context_settings)
+ #: the callback to execute when the command fires. This might be
+ #: `None` in which case nothing happens.
+ self.callback = callback
+ #: the list of parameters for this command in the order they
+ #: should show up in the help page and execute. Eager parameters
+ #: will automatically be handled before non eager ones.
+ self.params = params or []
+ self.help = help
+ self.epilog = epilog
+ self.options_metavar = options_metavar
+ if short_help is None and help:
+ short_help = make_default_short_help(help)
+ self.short_help = short_help
+ self.add_help_option = add_help_option
+
+ def get_usage(self, ctx):
+ formatter = ctx.make_formatter()
+ self.format_usage(ctx, formatter)
+ return formatter.getvalue().rstrip('\n')
+
+ def get_params(self, ctx):
+ rv = self.params
+ help_option = self.get_help_option(ctx)
+ if help_option is not None:
+ rv = rv + [help_option]
+ return rv
+
+ def format_usage(self, ctx, formatter):
+ """Writes the usage line into the formatter."""
+ pieces = self.collect_usage_pieces(ctx)
+ formatter.write_usage(ctx.command_path, ' '.join(pieces))
+
+ def collect_usage_pieces(self, ctx):
+ """Returns all the pieces that go into the usage line and returns
+ it as a list of strings.
+ """
+ rv = [self.options_metavar]
+ for param in self.get_params(ctx):
+ rv.extend(param.get_usage_pieces(ctx))
+ return rv
+
+ def get_help_option_names(self, ctx):
+ """Returns the names for the help option."""
+ all_names = set(ctx.help_option_names)
+ for param in self.params:
+ all_names.difference_update(param.opts)
+ all_names.difference_update(param.secondary_opts)
+ return all_names
+
+ def get_help_option(self, ctx):
+ """Returns the help option object."""
+ help_options = self.get_help_option_names(ctx)
+ if not help_options or not self.add_help_option:
+ return
+
+ def show_help(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+ return Option(help_options, is_flag=True,
+ is_eager=True, expose_value=False,
+ callback=show_help,
+ help='Show this message and exit.')
+
+ def make_parser(self, ctx):
+ """Creates the underlying option parser for this command."""
+ parser = OptionParser(ctx)
+ parser.allow_interspersed_args = ctx.allow_interspersed_args
+ parser.ignore_unknown_options = ctx.ignore_unknown_options
+ for param in self.get_params(ctx):
+ param.add_to_parser(parser, ctx)
+ return parser
+
+ def get_help(self, ctx):
+ """Formats the help into a string and returns it. This creates a
+ formatter and will call into the following formatting methods:
+ """
+ formatter = ctx.make_formatter()
+ self.format_help(ctx, formatter)
+ return formatter.getvalue().rstrip('\n')
+
+ def format_help(self, ctx, formatter):
+ """Writes the help into the formatter if it exists.
+
+ This calls into the following methods:
+
+ - :meth:`format_usage`
+ - :meth:`format_help_text`
+ - :meth:`format_options`
+ - :meth:`format_epilog`
+ """
+ self.format_usage(ctx, formatter)
+ self.format_help_text(ctx, formatter)
+ self.format_options(ctx, formatter)
+ self.format_epilog(ctx, formatter)
+
+ def format_help_text(self, ctx, formatter):
+ """Writes the help text to the formatter if it exists."""
+ if self.help:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.help)
+
+ def format_options(self, ctx, formatter):
+ """Writes all the options into the formatter if they exist."""
+ opts = []
+ for param in self.get_params(ctx):
+ rv = param.get_help_record(ctx)
+ if rv is not None:
+ opts.append(rv)
+
+ if opts:
+ with formatter.section('Options'):
+ formatter.write_dl(opts)
+
+ def format_epilog(self, ctx, formatter):
+ """Writes the epilog into the formatter if it exists."""
+ if self.epilog:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.epilog)
+
+ def parse_args(self, ctx, args):
+ parser = self.make_parser(ctx)
+ opts, args, param_order = parser.parse_args(args=args)
+
+ for param in iter_params_for_processing(
+ param_order, self.get_params(ctx)):
+ value, args = param.handle_parse_result(ctx, opts, args)
+
+ if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+ ctx.fail('Got unexpected extra argument%s (%s)'
+ % (len(args) != 1 and 's' or '',
+ ' '.join(map(make_str, args))))
+
+ ctx.args = args
+ return args
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the attached callback (if it exists)
+ in the right way.
+ """
+ if self.callback is not None:
+ return ctx.invoke(self.callback, **ctx.params)
+
+
+class MultiCommand(Command):
+ """A multi command is the basic implementation of a command that
+ dispatches to subcommands. The most common version is the
+ :class:`Group`.
+
+ :param invoke_without_command: this controls how the multi command itself
+ is invoked. By default it's only invoked
+ if a subcommand is provided.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is enabled by default if
+ `invoke_without_command` is disabled or disabled
+ if it's enabled. If enabled this will add
+ ``--help`` as argument if no arguments are
+ passed.
+ :param subcommand_metavar: the string that is used in the documentation
+ to indicate the subcommand place.
+ :param chain: if this is set to `True` chaining of multiple subcommands
+ is enabled. This restricts the form of commands in that
+ they cannot have optional arguments but it allows
+ multiple commands to be chained together.
+ :param result_callback: the result callback to attach to this multi
+ command.
+ """
+ allow_extra_args = True
+ allow_interspersed_args = False
+
+ def __init__(self, name=None, invoke_without_command=False,
+ no_args_is_help=None, subcommand_metavar=None,
+ chain=False, result_callback=None, **attrs):
+ Command.__init__(self, name, **attrs)
+ if no_args_is_help is None:
+ no_args_is_help = not invoke_without_command
+ self.no_args_is_help = no_args_is_help
+ self.invoke_without_command = invoke_without_command
+ if subcommand_metavar is None:
+ if chain:
+ subcommand_metavar = SUBCOMMANDS_METAVAR
+ else:
+ subcommand_metavar = SUBCOMMAND_METAVAR
+ self.subcommand_metavar = subcommand_metavar
+ self.chain = chain
+ #: The result callback that is stored. This can be set or
+ #: overridden with the :func:`resultcallback` decorator.
+ self.result_callback = result_callback
+
+ if self.chain:
+ for param in self.params:
+ if isinstance(param, Argument) and not param.required:
+ raise RuntimeError('Multi commands in chain mode cannot '
+ 'have optional arguments.')
+
+ def collect_usage_pieces(self, ctx):
+ rv = Command.collect_usage_pieces(self, ctx)
+ rv.append(self.subcommand_metavar)
+ return rv
+
+ def format_options(self, ctx, formatter):
+ Command.format_options(self, ctx, formatter)
+ self.format_commands(ctx, formatter)
+
+ def resultcallback(self, replace=False):
+ """Adds a result callback to the chain command. By default if a
+ result callback is already registered this will chain them but
+ this can be disabled with the `replace` parameter. The result
+ callback is invoked with the return value of the subcommand
+ (or the list of return values from all subcommands if chaining
+ is enabled) as well as the parameters as they would be passed
+ to the main callback.
+
+ Example::
+
+ @click.group()
+ @click.option('-i', '--input', default=23)
+ def cli(input):
+ return 42
+
+ @cli.resultcallback()
+ def process_result(result, input):
+ return result + input
+
+ .. versionadded:: 3.0
+
+ :param replace: if set to `True` an already existing result
+ callback will be removed.
+ """
+ def decorator(f):
+ old_callback = self.result_callback
+ if old_callback is None or replace:
+ self.result_callback = f
+ return f
+ def function(__value, *args, **kwargs):
+ return f(old_callback(__value, *args, **kwargs),
+ *args, **kwargs)
+ self.result_callback = rv = update_wrapper(function, f)
+ return rv
+ return decorator
+
+ def format_commands(self, ctx, formatter):
+ """Extra format methods for multi methods that adds all the commands
+ after the options.
+ """
+ rows = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+
+ help = cmd.short_help or ''
+ rows.append((subcommand, help))
+
+ if rows:
+ with formatter.section('Commands'):
+ formatter.write_dl(rows)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ rest = Command.parse_args(self, ctx, args)
+ if self.chain:
+ ctx.protected_args = rest
+ ctx.args = []
+ elif rest:
+ ctx.protected_args, ctx.args = rest[:1], rest[1:]
+
+ return ctx.args
+
+ def invoke(self, ctx):
+ def _process_result(value):
+ if self.result_callback is not None:
+ value = ctx.invoke(self.result_callback, value,
+ **ctx.params)
+ return value
+
+ if not ctx.protected_args:
+ # If we are invoked without command the chain flag controls
+ # how this happens. If we are not in chain mode, the return
+ # value here is the return value of the command.
+ # If however we are in chain mode, the return value is the
+ # return value of the result processor invoked with an empty
+ # list (which means that no subcommand actually was executed).
+ if self.invoke_without_command:
+ if not self.chain:
+ return Command.invoke(self, ctx)
+ with ctx:
+ Command.invoke(self, ctx)
+ return _process_result([])
+ ctx.fail('Missing command.')
+
+ # Fetch args back out
+ args = ctx.protected_args + ctx.args
+ ctx.args = []
+ ctx.protected_args = []
+
+ # If we're not in chain mode, we only allow the invocation of a
+ # single command but we also inform the current context about the
+ # name of the command to invoke.
+ if not self.chain:
+ # Make sure the context is entered so we do not clean up
+ # resources until the result processor has worked.
+ with ctx:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ ctx.invoked_subcommand = cmd_name
+ Command.invoke(self, ctx)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+ with sub_ctx:
+ return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+ # In chain mode we create the contexts step by step, but after the
+ # base command has been invoked. Because at that point we do not
+ # know the subcommands yet, the invoked subcommand attribute is
+ # set to ``*`` to inform the command that subcommands are executed
+ # but nothing else.
+ with ctx:
+ ctx.invoked_subcommand = args and '*' or None
+ Command.invoke(self, ctx)
+
+ # Otherwise we make every single context and invoke them in a
+ # chain. In that case the return value to the result processor
+ # is the list of all invoked subcommand's results.
+ contexts = []
+ while args:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False)
+ contexts.append(sub_ctx)
+ args, sub_ctx.args = sub_ctx.args, []
+
+ rv = []
+ for sub_ctx in contexts:
+ with sub_ctx:
+ rv.append(sub_ctx.command.invoke(sub_ctx))
+ return _process_result(rv)
+
+ def resolve_command(self, ctx, args):
+ cmd_name = make_str(args[0])
+ original_cmd_name = cmd_name
+
+ # Get the command
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we can't find the command but there is a normalization
+ # function available, we try with that one.
+ if cmd is None and ctx.token_normalize_func is not None:
+ cmd_name = ctx.token_normalize_func(cmd_name)
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we don't find the command we want to show an error message
+ # to the user that it was not provided. However, there is
+ # something else we should do: if the first argument looks like
+ # an option we want to kick off parsing again for arguments to
+ # resolve things like --help which now should go to the main
+ # place.
+ if cmd is None:
+ if split_opt(cmd_name)[0]:
+ self.parse_args(ctx, ctx.args)
+ ctx.fail('No such command "%s".' % original_cmd_name)
+
+ return cmd_name, cmd, args[1:]
+
+ def get_command(self, ctx, cmd_name):
+ """Given a context and a command name, this returns a
+ :class:`Command` object if it exists or returns `None`.
+ """
+ raise NotImplementedError()
+
+ def list_commands(self, ctx):
+ """Returns a list of subcommand names in the order they should
+ appear.
+ """
+ return []
+
+
+class Group(MultiCommand):
+ """A group allows a command to have subcommands attached. This is the
+ most common way to implement nesting in Click.
+
+ :param commands: a dictionary of commands.
+ """
+
+ def __init__(self, name=None, commands=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: the registered subcommands by their exported names.
+ self.commands = commands or {}
+
+ def add_command(self, cmd, name=None):
+ """Registers another :class:`Command` with this group. If the name
+ is not provided, the name of the command is used.
+ """
+ name = name or cmd.name
+ if name is None:
+ raise TypeError('Command has no name.')
+ _check_multicommand(self, name, cmd, register=True)
+ self.commands[name] = cmd
+
+ def command(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a command to
+ the group. This takes the same arguments as :func:`command` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ def decorator(f):
+ cmd = command(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a group to
+ the group. This takes the same arguments as :func:`group` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ def decorator(f):
+ cmd = group(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+ return decorator
+
+ def get_command(self, ctx, cmd_name):
+ return self.commands.get(cmd_name)
+
+ def list_commands(self, ctx):
+ return sorted(self.commands)
+
+
+class CommandCollection(MultiCommand):
+ """A command collection is a multi command that merges multiple multi
+ commands together into one. This is a straightforward implementation
+ that accepts a list of different multi commands as sources and
+ provides all the commands for each of them.
+ """
+
+ def __init__(self, name=None, sources=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: The list of registered multi commands.
+ self.sources = sources or []
+
+ def add_source(self, multi_cmd):
+ """Adds a new multi command to the chain dispatcher."""
+ self.sources.append(multi_cmd)
+
+ def get_command(self, ctx, cmd_name):
+ for source in self.sources:
+ rv = source.get_command(ctx, cmd_name)
+ if rv is not None:
+ if self.chain:
+ _check_multicommand(self, cmd_name, rv)
+ return rv
+
+ def list_commands(self, ctx):
+ rv = set()
+ for source in self.sources:
+ rv.update(source.list_commands(ctx))
+ return sorted(rv)
+
+
+class Parameter(object):
+ """A parameter to a command comes in two versions: they are either
+ :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
+ not supported by design as some of the internals for parsing are
+ intentionally not finalized.
+
+ Some settings are supported by both options and arguments.
+
+ .. versionchanged:: 2.0
+ Changed signature for parameter callback to also be passed the
+ parameter. In Click 2.0, the old callback format will still work,
+ but it will raise a warning to give you change to migrate the
+ code easier.
+
+ :param param_decls: the parameter declarations for this option or
+ argument. This is a list of flags or argument
+ names.
+ :param type: the type that should be used. Either a :class:`ParamType`
+ or a Python type. The later is converted into the former
+ automatically if supported.
+ :param required: controls if this is optional or not.
+ :param default: the default value if omitted. This can also be a callable,
+ in which case it's invoked when the default is needed
+ without any arguments.
+ :param callback: a callback that should be executed after the parameter
+ was matched. This is called as ``fn(ctx, param,
+ value)`` and needs to return the value. Before Click
+ 2.0, the signature was ``(ctx, value)``.
+ :param nargs: the number of arguments to match. If not ``1`` the return
+ value is a tuple instead of single value. The default for
+ nargs is ``1`` (except if the type is a tuple, then it's
+ the arity of the tuple).
+ :param metavar: how the value is represented in the help page.
+ :param expose_value: if this is `True` then the value is passed onwards
+ to the command callback and stored on the context,
+ otherwise it's skipped.
+ :param is_eager: eager values are processed before non eager ones. This
+ should not be set for arguments or it will inverse the
+ order of processing.
+ :param envvar: a string or list of strings that are environment variables
+ that should be checked.
+ """
+ param_type_name = 'parameter'
+
+ def __init__(self, param_decls=None, type=None, required=False,
+ default=None, callback=None, nargs=None, metavar=None,
+ expose_value=True, is_eager=False, envvar=None):
+ self.name, self.opts, self.secondary_opts = \
+ self._parse_decls(param_decls or (), expose_value)
+
+ self.type = convert_type(type, default)
+
+ # Default nargs to what the type tells us if we have that
+ # information available.
+ if nargs is None:
+ if self.type.is_composite:
+ nargs = self.type.arity
+ else:
+ nargs = 1
+
+ self.required = required
+ self.callback = callback
+ self.nargs = nargs
+ self.multiple = False
+ self.expose_value = expose_value
+ self.default = default
+ self.is_eager = is_eager
+ self.metavar = metavar
+ self.envvar = envvar
+
+ @property
+ def human_readable_name(self):
+ """Returns the human readable name of this parameter. This is the
+ same as the name for options, but the metavar for arguments.
+ """
+ return self.name
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ metavar = self.type.get_metavar(self)
+ if metavar is None:
+ metavar = self.type.name.upper()
+ if self.nargs != 1:
+ metavar += '...'
+ return metavar
+
+ def get_default(self, ctx):
+ """Given a context variable this calculates the default value."""
+ # Otherwise go with the regular default.
+ if callable(self.default):
+ rv = self.default()
+ else:
+ rv = self.default
+ return self.type_cast_value(ctx, rv)
+
+ def add_to_parser(self, parser, ctx):
+ pass
+
+ def consume_value(self, ctx, opts):
+ value = opts.get(self.name)
+ if value is None:
+ value = ctx.lookup_default(self.name)
+ if value is None:
+ value = self.value_from_envvar(ctx)
+ return value
+
+ def type_cast_value(self, ctx, value):
+ """Given a value this runs it properly through the type system.
+ This automatically handles things like `nargs` and `multiple` as
+ well as composite types.
+ """
+ if self.type.is_composite:
+ if self.nargs <= 1:
+ raise TypeError('Attempted to invoke composite type '
+ 'but nargs has been set to %s. This is '
+ 'not supported; nargs needs to be set to '
+ 'a fixed value > 1.' % self.nargs)
+ if self.multiple:
+ return tuple(self.type(x or (), self, ctx) for x in value or ())
+ return self.type(value or (), self, ctx)
+
+ def _convert(value, level):
+ if level == 0:
+ return self.type(value, self, ctx)
+ return tuple(_convert(x, level - 1) for x in value or ())
+ return _convert(value, (self.nargs != 1) + bool(self.multiple))
+
+ def process_value(self, ctx, value):
+ """Given a value and context this runs the logic to convert the
+ value as necessary.
+ """
+ # If the value we were given is None we do nothing. This way
+ # code that calls this can easily figure out if something was
+ # not provided. Otherwise it would be converted into an empty
+ # tuple for multiple invocations which is inconvenient.
+ if value is not None:
+ return self.type_cast_value(ctx, value)
+
+ def value_is_missing(self, value):
+ if value is None:
+ return True
+ if (self.nargs != 1 or self.multiple) and value == ():
+ return True
+ return False
+
+ def full_process_value(self, ctx, value):
+ value = self.process_value(ctx, value)
+
+ if value is None:
+ value = self.get_default(ctx)
+
+ if self.required and self.value_is_missing(value):
+ raise MissingParameter(ctx=ctx, param=self)
+
+ return value
+
+ def resolve_envvar_value(self, ctx):
+ if self.envvar is None:
+ return
+ if isinstance(self.envvar, (tuple, list)):
+ for envvar in self.envvar:
+ rv = os.environ.get(envvar)
+ if rv is not None:
+ return rv
+ else:
+ return os.environ.get(self.envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is not None and self.nargs != 1:
+ rv = self.type.split_envvar_value(rv)
+ return rv
+
+ def handle_parse_result(self, ctx, opts, args):
+ with augment_usage_errors(ctx, param=self):
+ value = self.consume_value(ctx, opts)
+ try:
+ value = self.full_process_value(ctx, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+ value = None
+ if self.callback is not None:
+ try:
+ value = invoke_param_callback(
+ self.callback, ctx, self, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+
+ if self.expose_value:
+ ctx.params[self.name] = value
+ return value, args
+
+ def get_help_record(self, ctx):
+ pass
+
+ def get_usage_pieces(self, ctx):
+ return []
+
+
+class Option(Parameter):
+ """Options are usually optional values on the command line and
+ have some extra features that arguments don't have.
+
+ All other parameters are passed onwards to the parameter constructor.
+
+ :param show_default: controls if the default value should be shown on the
+ help page. Normally, defaults are not shown.
+ :param prompt: if set to `True` or a non empty string then the user will
+ be prompted for input if not set. If set to `True` the
+ prompt will be the option name capitalized.
+ :param confirmation_prompt: if set then the value will need to be confirmed
+ if it was prompted for.
+ :param hide_input: if this is `True` then the input on the prompt will be
+ hidden from the user. This is useful for password
+ input.
+ :param is_flag: forces this option to act as a flag. The default is
+ auto detection.
+ :param flag_value: which value should be used for this flag if it's
+ enabled. This is set to a boolean automatically if
+ the option string contains a slash to mark two options.
+ :param multiple: if this is set to `True` then the argument is accepted
+ multiple times and recorded. This is similar to ``nargs``
+ in how it works but supports arbitrary number of
+ arguments.
+ :param count: this flag makes an option increment an integer.
+ :param allow_from_autoenv: if this is enabled then the value of this
+ parameter will be pulled from an environment
+ variable in case a prefix is defined on the
+ context.
+ :param help: the help string.
+ """
+ param_type_name = 'option'
+
+ def __init__(self, param_decls=None, show_default=False,
+ prompt=False, confirmation_prompt=False,
+ hide_input=False, is_flag=None, flag_value=None,
+ multiple=False, count=False, allow_from_autoenv=True,
+ type=None, help=None, **attrs):
+ default_is_missing = attrs.get('default', _missing) is _missing
+ Parameter.__init__(self, param_decls, type=type, **attrs)
+
+ if prompt is True:
+ prompt_text = self.name.replace('_', ' ').capitalize()
+ elif prompt is False:
+ prompt_text = None
+ else:
+ prompt_text = prompt
+ self.prompt = prompt_text
+ self.confirmation_prompt = confirmation_prompt
+ self.hide_input = hide_input
+
+ # Flags
+ if is_flag is None:
+ if flag_value is not None:
+ is_flag = True
+ else:
+ is_flag = bool(self.secondary_opts)
+ if is_flag and default_is_missing:
+ self.default = False
+ if flag_value is None:
+ flag_value = not self.default
+ self.is_flag = is_flag
+ self.flag_value = flag_value
+ if self.is_flag and isinstance(self.flag_value, bool) \
+ and type is None:
+ self.type = BOOL
+ self.is_bool_flag = True
+ else:
+ self.is_bool_flag = False
+
+ # Counting
+ self.count = count
+ if count:
+ if type is None:
+ self.type = IntRange(min=0)
+ if default_is_missing:
+ self.default = 0
+
+ self.multiple = multiple
+ self.allow_from_autoenv = allow_from_autoenv
+ self.help = help
+ self.show_default = show_default
+
+ # Sanity check for stuff we don't support
+ if __debug__:
+ if self.nargs < 0:
+ raise TypeError('Options cannot have nargs < 0')
+ if self.prompt and self.is_flag and not self.is_bool_flag:
+ raise TypeError('Cannot prompt for flags that are not bools.')
+ if not self.is_bool_flag and self.secondary_opts:
+ raise TypeError('Got secondary option for non boolean flag.')
+ if self.is_bool_flag and self.hide_input \
+ and self.prompt is not None:
+ raise TypeError('Hidden input does not work with boolean '
+ 'flag prompts.')
+ if self.count:
+ if self.multiple:
+ raise TypeError('Options cannot be multiple and count '
+ 'at the same time.')
+ elif self.is_flag:
+ raise TypeError('Options cannot be count and flags at '
+ 'the same time.')
+
+ def _parse_decls(self, decls, expose_value):
+ opts = []
+ secondary_opts = []
+ name = None
+ possible_names = []
+
+ for decl in decls:
+ if isidentifier(decl):
+ if name is not None:
+ raise TypeError('Name defined twice')
+ name = decl
+ else:
+ split_char = decl[:1] == '/' and ';' or '/'
+ if split_char in decl:
+ first, second = decl.split(split_char, 1)
+ first = first.rstrip()
+ if first:
+ possible_names.append(split_opt(first))
+ opts.append(first)
+ second = second.lstrip()
+ if second:
+ secondary_opts.append(second.lstrip())
+ else:
+ possible_names.append(split_opt(decl))
+ opts.append(decl)
+
+ if name is None and possible_names:
+ possible_names.sort(key=lambda x: len(x[0]))
+ name = possible_names[-1][1].replace('-', '_').lower()
+ if not isidentifier(name):
+ name = None
+
+ if name is None:
+ if not expose_value:
+ return None, opts, secondary_opts
+ raise TypeError('Could not determine name for option')
+
+ if not opts and not secondary_opts:
+ raise TypeError('No options defined but a name was passed (%s). '
+ 'Did you mean to declare an argument instead '
+ 'of an option?' % name)
+
+ return name, opts, secondary_opts
+
+ def add_to_parser(self, parser, ctx):
+ kwargs = {
+ 'dest': self.name,
+ 'nargs': self.nargs,
+ 'obj': self,
+ }
+
+ if self.multiple:
+ action = 'append'
+ elif self.count:
+ action = 'count'
+ else:
+ action = 'store'
+
+ if self.is_flag:
+ kwargs.pop('nargs', None)
+ if self.is_bool_flag and self.secondary_opts:
+ parser.add_option(self.opts, action=action + '_const',
+ const=True, **kwargs)
+ parser.add_option(self.secondary_opts, action=action +
+ '_const', const=False, **kwargs)
+ else:
+ parser.add_option(self.opts, action=action + '_const',
+ const=self.flag_value,
+ **kwargs)
+ else:
+ kwargs['action'] = action
+ parser.add_option(self.opts, **kwargs)
+
+ def get_help_record(self, ctx):
+ any_prefix_is_slash = []
+
+ def _write_opts(opts):
+ rv, any_slashes = join_options(opts)
+ if any_slashes:
+ any_prefix_is_slash[:] = [True]
+ if not self.is_flag and not self.count:
+ rv += ' ' + self.make_metavar()
+ return rv
+
+ rv = [_write_opts(self.opts)]
+ if self.secondary_opts:
+ rv.append(_write_opts(self.secondary_opts))
+
+ help = self.help or ''
+ extra = []
+ if self.default is not None and self.show_default:
+ extra.append('default: %s' % (
+ ', '.join('%s' % d for d in self.default)
+ if isinstance(self.default, (list, tuple))
+ else self.default, ))
+ if self.required:
+ extra.append('required')
+ if extra:
+ help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
+
+ return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
+
+ def get_default(self, ctx):
+ # If we're a non boolean flag out default is more complex because
+ # we need to look at all flags in the same group to figure out
+ # if we're the the default one in which case we return the flag
+ # value as default.
+ if self.is_flag and not self.is_bool_flag:
+ for param in ctx.command.params:
+ if param.name == self.name and param.default:
+ return param.flag_value
+ return None
+ return Parameter.get_default(self, ctx)
+
+ def prompt_for_value(self, ctx):
+ """This is an alternative flow that can be activated in the full
+ value processing if a value does not exist. It will prompt the
+ user until a valid value exists and then returns the processed
+ value as result.
+ """
+ # Calculate the default before prompting anything to be stable.
+ default = self.get_default(ctx)
+
+ # If this is a prompt for a flag we need to handle this
+ # differently.
+ if self.is_bool_flag:
+ return confirm(self.prompt, default)
+
+ return prompt(self.prompt, default=default,
+ hide_input=self.hide_input,
+ confirmation_prompt=self.confirmation_prompt,
+ value_proc=lambda x: self.process_value(ctx, x))
+
+ def resolve_envvar_value(self, ctx):
+ rv = Parameter.resolve_envvar_value(self, ctx)
+ if rv is not None:
+ return rv
+ if self.allow_from_autoenv and \
+ ctx.auto_envvar_prefix is not None:
+ envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
+ return os.environ.get(envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is None:
+ return None
+ value_depth = (self.nargs != 1) + bool(self.multiple)
+ if value_depth > 0 and rv is not None:
+ rv = self.type.split_envvar_value(rv)
+ if self.multiple and self.nargs != 1:
+ rv = batch(rv, self.nargs)
+ return rv
+
+ def full_process_value(self, ctx, value):
+ if value is None and self.prompt is not None \
+ and not ctx.resilient_parsing:
+ return self.prompt_for_value(ctx)
+ return Parameter.full_process_value(self, ctx, value)
+
+
+class Argument(Parameter):
+ """Arguments are positional parameters to a command. They generally
+ provide fewer features than options but can have infinite ``nargs``
+ and are required by default.
+
+ All parameters are passed onwards to the parameter constructor.
+ """
+ param_type_name = 'argument'
+
+ def __init__(self, param_decls, required=None, **attrs):
+ if required is None:
+ if attrs.get('default') is not None:
+ required = False
+ else:
+ required = attrs.get('nargs', 1) > 0
+ Parameter.__init__(self, param_decls, required=required, **attrs)
+ if self.default is not None and self.nargs < 0:
+ raise TypeError('nargs=-1 in combination with a default value '
+ 'is not supported.')
+
+ @property
+ def human_readable_name(self):
+ if self.metavar is not None:
+ return self.metavar
+ return self.name.upper()
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ var = self.name.upper()
+ if not self.required:
+ var = '[%s]' % var
+ if self.nargs != 1:
+ var += '...'
+ return var
+
+ def _parse_decls(self, decls, expose_value):
+ if not decls:
+ if not expose_value:
+ return None, [], []
+ raise TypeError('Could not determine name for argument')
+ if len(decls) == 1:
+ name = arg = decls[0]
+ name = name.replace('-', '_').lower()
+ elif len(decls) == 2:
+ name, arg = decls
+ else:
+ raise TypeError('Arguments take exactly one or two '
+ 'parameter declarations, got %d' % len(decls))
+ return name, [arg], []
+
+ def get_usage_pieces(self, ctx):
+ return [self.make_metavar()]
+
+ def add_to_parser(self, parser, ctx):
+ parser.add_argument(dest=self.name, nargs=self.nargs,
+ obj=self)
+
+
+# Circular dependency between decorators and core
+from .decorators import command, group
diff --git a/appWSM/lib/python2.7/site-packages/click/decorators.py b/appWSM/lib/python2.7/site-packages/click/decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..9893452650ba0acd4ff9eef10cb92cf1433089b7
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/decorators.py
@@ -0,0 +1,304 @@
+import sys
+import inspect
+
+from functools import update_wrapper
+
+from ._compat import iteritems
+from ._unicodefun import _check_for_unicode_literals
+from .utils import echo
+from .globals import get_current_context
+
+
+def pass_context(f):
+ """Marks a callback as wanting to receive the current context
+ object as first argument.
+ """
+ def new_func(*args, **kwargs):
+ return f(get_current_context(), *args, **kwargs)
+ return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+ """Similar to :func:`pass_context`, but only pass the object on the
+ context onwards (:attr:`Context.obj`). This is useful if that object
+ represents the state of a nested system.
+ """
+ def new_func(*args, **kwargs):
+ return f(get_current_context().obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+ """Given an object type this creates a decorator that will work
+ similar to :func:`pass_obj` but instead of passing the object of the
+ current context, it will find the innermost context of type
+ :func:`object_type`.
+
+ This generates a decorator that works roughly like this::
+
+ from functools import update_wrapper
+
+ def decorator(f):
+ @pass_context
+ def new_func(ctx, *args, **kwargs):
+ obj = ctx.find_object(object_type)
+ return ctx.invoke(f, obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+ :param object_type: the type of the object to pass.
+ :param ensure: if set to `True`, a new object will be created and
+ remembered on the context if it's not there yet.
+ """
+ def decorator(f):
+ def new_func(*args, **kwargs):
+ ctx = get_current_context()
+ if ensure:
+ obj = ctx.ensure_object(object_type)
+ else:
+ obj = ctx.find_object(object_type)
+ if obj is None:
+ raise RuntimeError('Managed to invoke callback without a '
+ 'context object of type %r existing'
+ % object_type.__name__)
+ return ctx.invoke(f, obj, *args[1:], **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+
+def _make_command(f, name, attrs, cls):
+ if isinstance(f, Command):
+ raise TypeError('Attempted to convert a callback into a '
+ 'command twice.')
+ try:
+ params = f.__click_params__
+ params.reverse()
+ del f.__click_params__
+ except AttributeError:
+ params = []
+ help = attrs.get('help')
+ if help is None:
+ help = inspect.getdoc(f)
+ if isinstance(help, bytes):
+ help = help.decode('utf-8')
+ else:
+ help = inspect.cleandoc(help)
+ attrs['help'] = help
+ _check_for_unicode_literals()
+ return cls(name=name or f.__name__.lower(),
+ callback=f, params=params, **attrs)
+
+
+def command(name=None, cls=None, **attrs):
+ """Creates a new :class:`Command` and uses the decorated function as
+ callback. This will also automatically attach all decorated
+ :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+ The name of the command defaults to the name of the function. If you
+ want to change that, you can pass the intended name as the first
+ argument.
+
+ All keyword arguments are forwarded to the underlying command class.
+
+ Once decorated the function turns into a :class:`Command` instance
+ that can be invoked as a command line utility or be attached to a
+ command :class:`Group`.
+
+ :param name: the name of the command. This defaults to the function
+ name.
+ :param cls: the command class to instantiate. This defaults to
+ :class:`Command`.
+ """
+ if cls is None:
+ cls = Command
+ def decorator(f):
+ cmd = _make_command(f, name, attrs, cls)
+ cmd.__doc__ = f.__doc__
+ return cmd
+ return decorator
+
+
+def group(name=None, **attrs):
+ """Creates a new :class:`Group` with a function as callback. This
+ works otherwise the same as :func:`command` just that the `cls`
+ parameter is set to :class:`Group`.
+ """
+ attrs.setdefault('cls', Group)
+ return command(name, **attrs)
+
+
+def _param_memo(f, param):
+ if isinstance(f, Command):
+ f.params.append(param)
+ else:
+ if not hasattr(f, '__click_params__'):
+ f.__click_params__ = []
+ f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+ """Attaches an argument to the command. All positional arguments are
+ passed as parameter declarations to :class:`Argument`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Argument` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the argument class to instantiate. This defaults to
+ :class:`Argument`.
+ """
+ def decorator(f):
+ ArgumentClass = attrs.pop('cls', Argument)
+ _param_memo(f, ArgumentClass(param_decls, **attrs))
+ return f
+ return decorator
+
+
+def option(*param_decls, **attrs):
+ """Attaches an option to the command. All positional arguments are
+ passed as parameter declarations to :class:`Option`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Option` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the option class to instantiate. This defaults to
+ :class:`Option`.
+ """
+ def decorator(f):
+ if 'help' in attrs:
+ attrs['help'] = inspect.cleandoc(attrs['help'])
+ OptionClass = attrs.pop('cls', Option)
+ _param_memo(f, OptionClass(param_decls, **attrs))
+ return f
+ return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+ """Shortcut for confirmation prompts that can be ignored by passing
+ ``--yes`` as parameter.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ @click.command()
+ @click.option('--yes', is_flag=True, callback=callback,
+ expose_value=False, prompt='Do you want to continue?')
+ def dropdb():
+ pass
+ """
+ def decorator(f):
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('callback', callback)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('prompt', 'Do you want to continue?')
+ attrs.setdefault('help', 'Confirm the action without prompting.')
+ return option(*(param_decls or ('--yes',)), **attrs)(f)
+ return decorator
+
+
+def password_option(*param_decls, **attrs):
+ """Shortcut for password prompts.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ @click.command()
+ @click.option('--password', prompt=True, confirmation_prompt=True,
+ hide_input=True)
+ def changeadmin(password):
+ pass
+ """
+ def decorator(f):
+ attrs.setdefault('prompt', True)
+ attrs.setdefault('confirmation_prompt', True)
+ attrs.setdefault('hide_input', True)
+ return option(*(param_decls or ('--password',)), **attrs)(f)
+ return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+ """Adds a ``--version`` option which immediately ends the program
+ printing out the version number. This is implemented as an eager
+ option that prints the version and exits the program in the callback.
+
+ :param version: the version number to show. If not provided Click
+ attempts an auto discovery via setuptools.
+ :param prog_name: the name of the program (defaults to autodetection)
+ :param message: custom message to show instead of the default
+ (``'%(prog)s, version %(version)s'``)
+ :param others: everything else is forwarded to :func:`option`.
+ """
+ if version is None:
+ module = sys._getframe(1).f_globals.get('__name__')
+ def decorator(f):
+ prog_name = attrs.pop('prog_name', None)
+ message = attrs.pop('message', '%(prog)s, version %(version)s')
+
+ def callback(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ prog = prog_name
+ if prog is None:
+ prog = ctx.find_root().info_name
+ ver = version
+ if ver is None:
+ try:
+ import pkg_resources
+ except ImportError:
+ pass
+ else:
+ for dist in pkg_resources.working_set:
+ scripts = dist.get_entry_map().get('console_scripts') or {}
+ for script_name, entry_point in iteritems(scripts):
+ if entry_point.module_name == module:
+ ver = dist.version
+ break
+ if ver is None:
+ raise RuntimeError('Could not determine version')
+ echo(message % {
+ 'prog': prog,
+ 'version': ver,
+ }, color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('is_eager', True)
+ attrs.setdefault('help', 'Show the version and exit.')
+ attrs['callback'] = callback
+ return option(*(param_decls or ('--version',)), **attrs)(f)
+ return decorator
+
+
+def help_option(*param_decls, **attrs):
+ """Adds a ``--help`` option which immediately ends the program
+ printing out the help page. This is usually unnecessary to add as
+ this is added by default to all commands unless suppressed.
+
+ Like :func:`version_option`, this is implemented as eager option that
+ prints in the callback and exits.
+
+ All arguments are forwarded to :func:`option`.
+ """
+ def decorator(f):
+ def callback(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('help', 'Show this message and exit.')
+ attrs.setdefault('is_eager', True)
+ attrs['callback'] = callback
+ return option(*(param_decls or ('--help',)), **attrs)(f)
+ return decorator
+
+
+# Circular dependencies between core and decorators
+from .core import Command, Group, Argument, Option
diff --git a/appWSM/lib/python2.7/site-packages/click/exceptions.py b/appWSM/lib/python2.7/site-packages/click/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..74a4542bb57f29be1a50412b063f4298448e70c0
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/exceptions.py
@@ -0,0 +1,201 @@
+from ._compat import PY2, filename_to_ui, get_text_stderr
+from .utils import echo
+
+
+class ClickException(Exception):
+ """An exception that Click can handle and show to the user."""
+
+ #: The exit code for this exception
+ exit_code = 1
+
+ def __init__(self, message):
+ if PY2:
+ if message is not None:
+ message = message.encode('utf-8')
+ Exception.__init__(self, message)
+ self.message = message
+
+ def format_message(self):
+ return self.message
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ echo('Error: %s' % self.format_message(), file=file)
+
+
+class UsageError(ClickException):
+ """An internal exception that signals a usage error. This typically
+ aborts any further handling.
+
+ :param message: the error message to display.
+ :param ctx: optionally the context that caused this error. Click will
+ fill in the context automatically in some situations.
+ """
+ exit_code = 2
+
+ def __init__(self, message, ctx=None):
+ ClickException.__init__(self, message)
+ self.ctx = ctx
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ color = None
+ if self.ctx is not None:
+ color = self.ctx.color
+ echo(self.ctx.get_usage() + '\n', file=file, color=color)
+ echo('Error: %s' % self.format_message(), file=file, color=color)
+
+
+class BadParameter(UsageError):
+ """An exception that formats out a standardized error message for a
+ bad parameter. This is useful when thrown from a callback or type as
+ Click will attach contextual information to it (for instance, which
+ parameter it is).
+
+ .. versionadded:: 2.0
+
+ :param param: the parameter object that caused this error. This can
+ be left out, and Click will attach this info itself
+ if possible.
+ :param param_hint: a string that shows up as parameter name. This
+ can be used as alternative to `param` in cases
+ where custom validation should happen. If it is
+ a string it's used as such, if it's a list then
+ each item is quoted and separated.
+ """
+
+ def __init__(self, message, ctx=None, param=None,
+ param_hint=None):
+ UsageError.__init__(self, message, ctx)
+ self.param = param
+ self.param_hint = param_hint
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.opts or [self.param.human_readable_name]
+ else:
+ return 'Invalid value: %s' % self.message
+ if isinstance(param_hint, (tuple, list)):
+ param_hint = ' / '.join('"%s"' % x for x in param_hint)
+ return 'Invalid value for %s: %s' % (param_hint, self.message)
+
+
+class MissingParameter(BadParameter):
+ """Raised if click required an option or argument but it was not
+ provided when invoking the script.
+
+ .. versionadded:: 4.0
+
+ :param param_type: a string that indicates the type of the parameter.
+ The default is to inherit the parameter type from
+ the given `param`. Valid values are ``'parameter'``,
+ ``'option'`` or ``'argument'``.
+ """
+
+ def __init__(self, message=None, ctx=None, param=None,
+ param_hint=None, param_type=None):
+ BadParameter.__init__(self, message, ctx, param, param_hint)
+ self.param_type = param_type
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.opts or [self.param.human_readable_name]
+ else:
+ param_hint = None
+ if isinstance(param_hint, (tuple, list)):
+ param_hint = ' / '.join('"%s"' % x for x in param_hint)
+
+ param_type = self.param_type
+ if param_type is None and self.param is not None:
+ param_type = self.param.param_type_name
+
+ msg = self.message
+ if self.param is not None:
+ msg_extra = self.param.type.get_missing_message(self.param)
+ if msg_extra:
+ if msg:
+ msg += '. ' + msg_extra
+ else:
+ msg = msg_extra
+
+ return 'Missing %s%s%s%s' % (
+ param_type,
+ param_hint and ' %s' % param_hint or '',
+ msg and '. ' or '.',
+ msg or '',
+ )
+
+
+class NoSuchOption(UsageError):
+ """Raised if click attempted to handle an option that does not
+ exist.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, option_name, message=None, possibilities=None,
+ ctx=None):
+ if message is None:
+ message = 'no such option: %s' % option_name
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+ self.possibilities = possibilities
+
+ def format_message(self):
+ bits = [self.message]
+ if self.possibilities:
+ if len(self.possibilities) == 1:
+ bits.append('Did you mean %s?' % self.possibilities[0])
+ else:
+ possibilities = sorted(self.possibilities)
+ bits.append('(Possible options: %s)' % ', '.join(possibilities))
+ return ' '.join(bits)
+
+
+class BadOptionUsage(UsageError):
+ """Raised if an option is generally supplied but the use of the option
+ was incorrect. This is for instance raised if the number of arguments
+ for an option is not correct.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class BadArgumentUsage(UsageError):
+ """Raised if an argument is generally supplied but the use of the argument
+ was incorrect. This is for instance raised if the number of values
+ for an argument is not correct.
+
+ .. versionadded:: 6.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+ """Raised if a file cannot be opened."""
+
+ def __init__(self, filename, hint=None):
+ ui_filename = filename_to_ui(filename)
+ if hint is None:
+ hint = 'unknown error'
+ ClickException.__init__(self, hint)
+ self.ui_filename = ui_filename
+ self.filename = filename
+
+ def format_message(self):
+ return 'Could not open file %s: %s' % (self.ui_filename, self.message)
+
+
+class Abort(RuntimeError):
+ """An internal signalling exception that signals Click to abort."""
diff --git a/appWSM/lib/python2.7/site-packages/click/formatting.py b/appWSM/lib/python2.7/site-packages/click/formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3d6a4d389019c3c5cf61a1ab9be8e6836bd15e6
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/formatting.py
@@ -0,0 +1,256 @@
+from contextlib import contextmanager
+from .termui import get_terminal_size
+from .parser import split_opt
+from ._compat import term_len
+
+
+# Can force a width. This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+ widths = {}
+ for row in rows:
+ for idx, col in enumerate(row):
+ widths[idx] = max(widths.get(idx, 0), term_len(col))
+ return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+ for row in rows:
+ row = tuple(row)
+ yield row + ('',) * (col_count - len(row))
+
+
+def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
+ preserve_paragraphs=False):
+ """A helper function that intelligently wraps text. By default, it
+ assumes that it operates on a single paragraph of text but if the
+ `preserve_paragraphs` parameter is provided it will intelligently
+ handle paragraphs (defined by two empty lines).
+
+ If paragraphs are handled, a paragraph can be prefixed with an empty
+ line containing the ``\\b`` character (``\\x08``) to indicate that
+ no rewrapping should happen in that block.
+
+ :param text: the text that should be rewrapped.
+ :param width: the maximum width for the text.
+ :param initial_indent: the initial indent that should be placed on the
+ first line as a string.
+ :param subsequent_indent: the indent string that should be placed on
+ each consecutive line.
+ :param preserve_paragraphs: if this flag is set then the wrapping will
+ intelligently handle paragraphs.
+ """
+ from ._textwrap import TextWrapper
+ text = text.expandtabs()
+ wrapper = TextWrapper(width, initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent,
+ replace_whitespace=False)
+ if not preserve_paragraphs:
+ return wrapper.fill(text)
+
+ p = []
+ buf = []
+ indent = None
+
+ def _flush_par():
+ if not buf:
+ return
+ if buf[0].strip() == '\b':
+ p.append((indent or 0, True, '\n'.join(buf[1:])))
+ else:
+ p.append((indent or 0, False, ' '.join(buf)))
+ del buf[:]
+
+ for line in text.splitlines():
+ if not line:
+ _flush_par()
+ indent = None
+ else:
+ if indent is None:
+ orig_len = term_len(line)
+ line = line.lstrip()
+ indent = orig_len - term_len(line)
+ buf.append(line)
+ _flush_par()
+
+ rv = []
+ for indent, raw, text in p:
+ with wrapper.extra_indent(' ' * indent):
+ if raw:
+ rv.append(wrapper.indent_only(text))
+ else:
+ rv.append(wrapper.fill(text))
+
+ return '\n\n'.join(rv)
+
+
+class HelpFormatter(object):
+ """This class helps with formatting text-based help pages. It's
+ usually just needed for very special internal cases, but it's also
+ exposed so that developers can write their own fancy outputs.
+
+ At present, it always writes into memory.
+
+ :param indent_increment: the additional increment for each level.
+ :param width: the width for the text. This defaults to the terminal
+ width clamped to a maximum of 78.
+ """
+
+ def __init__(self, indent_increment=2, width=None, max_width=None):
+ self.indent_increment = indent_increment
+ if max_width is None:
+ max_width = 80
+ if width is None:
+ width = FORCED_WIDTH
+ if width is None:
+ width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+ self.width = width
+ self.current_indent = 0
+ self.buffer = []
+
+ def write(self, string):
+ """Writes a unicode string into the internal buffer."""
+ self.buffer.append(string)
+
+ def indent(self):
+ """Increases the indentation."""
+ self.current_indent += self.indent_increment
+
+ def dedent(self):
+ """Decreases the indentation."""
+ self.current_indent -= self.indent_increment
+
+ def write_usage(self, prog, args='', prefix='Usage: '):
+ """Writes a usage line into the buffer.
+
+ :param prog: the program name.
+ :param args: whitespace separated list of arguments.
+ :param prefix: the prefix for the first line.
+ """
+ usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
+ text_width = self.width - self.current_indent
+
+ if text_width >= (term_len(usage_prefix) + 20):
+ # The arguments will fit to the right of the prefix.
+ indent = ' ' * term_len(usage_prefix)
+ self.write(wrap_text(args, text_width,
+ initial_indent=usage_prefix,
+ subsequent_indent=indent))
+ else:
+ # The prefix is too long, put the arguments on the next line.
+ self.write(usage_prefix)
+ self.write('\n')
+ indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
+ self.write(wrap_text(args, text_width,
+ initial_indent=indent,
+ subsequent_indent=indent))
+
+ self.write('\n')
+
+ def write_heading(self, heading):
+ """Writes a heading into the buffer."""
+ self.write('%*s%s:\n' % (self.current_indent, '', heading))
+
+ def write_paragraph(self):
+ """Writes a paragraph into the buffer."""
+ if self.buffer:
+ self.write('\n')
+
+ def write_text(self, text):
+ """Writes re-indented text into the buffer. This rewraps and
+ preserves paragraphs.
+ """
+ text_width = max(self.width - self.current_indent, 11)
+ indent = ' ' * self.current_indent
+ self.write(wrap_text(text, text_width,
+ initial_indent=indent,
+ subsequent_indent=indent,
+ preserve_paragraphs=True))
+ self.write('\n')
+
+ def write_dl(self, rows, col_max=30, col_spacing=2):
+ """Writes a definition list into the buffer. This is how options
+ and commands are usually formatted.
+
+ :param rows: a list of two item tuples for the terms and values.
+ :param col_max: the maximum width of the first column.
+ :param col_spacing: the number of spaces between the first and
+ second column.
+ """
+ rows = list(rows)
+ widths = measure_table(rows)
+ if len(widths) != 2:
+ raise TypeError('Expected two columns for definition list')
+
+ first_col = min(widths[0], col_max) + col_spacing
+
+ for first, second in iter_rows(rows, len(widths)):
+ self.write('%*s%s' % (self.current_indent, '', first))
+ if not second:
+ self.write('\n')
+ continue
+ if term_len(first) <= first_col - col_spacing:
+ self.write(' ' * (first_col - term_len(first)))
+ else:
+ self.write('\n')
+ self.write(' ' * (first_col + self.current_indent))
+
+ text_width = max(self.width - first_col - 2, 10)
+ lines = iter(wrap_text(second, text_width).splitlines())
+ if lines:
+ self.write(next(lines) + '\n')
+ for line in lines:
+ self.write('%*s%s\n' % (
+ first_col + self.current_indent, '', line))
+ else:
+ self.write('\n')
+
+ @contextmanager
+ def section(self, name):
+ """Helpful context manager that writes a paragraph, a heading,
+ and the indents.
+
+ :param name: the section name that is written as heading.
+ """
+ self.write_paragraph()
+ self.write_heading(name)
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ @contextmanager
+ def indentation(self):
+ """A context manager that increases the indentation."""
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ def getvalue(self):
+ """Returns the buffer contents."""
+ return ''.join(self.buffer)
+
+
+def join_options(options):
+ """Given a list of option strings this joins them in the most appropriate
+ way and returns them in the form ``(formatted_string,
+ any_prefix_is_slash)`` where the second item in the tuple is a flag that
+ indicates if any of the option prefixes was a slash.
+ """
+ rv = []
+ any_prefix_is_slash = False
+ for opt in options:
+ prefix = split_opt(opt)[0]
+ if prefix == '/':
+ any_prefix_is_slash = True
+ rv.append((len(prefix), opt))
+
+ rv.sort(key=lambda x: x[0])
+
+ rv = ', '.join(x[1] for x in rv)
+ return rv, any_prefix_is_slash
diff --git a/appWSM/lib/python2.7/site-packages/click/globals.py b/appWSM/lib/python2.7/site-packages/click/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..14338e6bb8434b6328830cfa1aaf21acf80a492c
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/globals.py
@@ -0,0 +1,48 @@
+from threading import local
+
+
+_local = local()
+
+
+def get_current_context(silent=False):
+ """Returns the current click context. This can be used as a way to
+ access the current context object from anywhere. This is a more implicit
+ alternative to the :func:`pass_context` decorator. This function is
+ primarily useful for helpers such as :func:`echo` which might be
+ interested in changing it's behavior based on the current context.
+
+ To push the current context, :meth:`Context.scope` can be used.
+
+ .. versionadded:: 5.0
+
+ :param silent: is set to `True` the return value is `None` if no context
+ is available. The default behavior is to raise a
+ :exc:`RuntimeError`.
+ """
+ try:
+ return getattr(_local, 'stack')[-1]
+ except (AttributeError, IndexError):
+ if not silent:
+ raise RuntimeError('There is no active click context.')
+
+
+def push_context(ctx):
+ """Pushes a new context to the current stack."""
+ _local.__dict__.setdefault('stack', []).append(ctx)
+
+
+def pop_context():
+ """Removes the top level from the stack."""
+ _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+ """"Internal helper to get the default value of the color flag. If a
+ value is passed it's returned unchanged, otherwise it's looked up from
+ the current context.
+ """
+ if color is not None:
+ return color
+ ctx = get_current_context(silent=True)
+ if ctx is not None:
+ return ctx.color
diff --git a/appWSM/lib/python2.7/site-packages/click/parser.py b/appWSM/lib/python2.7/site-packages/click/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..9775c9ff9b99d85ed040e410f5209a342b9dd565
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/parser.py
@@ -0,0 +1,426 @@
+# -*- coding: utf-8 -*-
+"""
+ click.parser
+ ~~~~~~~~~~~~
+
+ This module started out as largely a copy paste from the stdlib's
+ optparse module with the features removed that we do not need from
+ optparse because we implement them in Click on a higher level (for
+ instance type handling, help formatting and a lot more).
+
+ The plan is to remove more and more from here over time.
+
+ The reason this is a different module and not optparse from the stdlib
+ is that there are differences in 2.x and 3.x about the error messages
+ generated and optparse in the stdlib uses gettext for no good reason
+ and might cause us issues.
+"""
+import re
+from collections import deque
+from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
+ BadArgumentUsage
+
+
+def _unpack_args(args, nargs_spec):
+ """Given an iterable of arguments and an iterable of nargs specifications,
+ it returns a tuple with all the unpacked arguments at the first index
+ and all remaining arguments as the second.
+
+ The nargs specification is the number of arguments that should be consumed
+ or `-1` to indicate that this position should eat up all the remainders.
+
+ Missing items are filled with `None`.
+ """
+ args = deque(args)
+ nargs_spec = deque(nargs_spec)
+ rv = []
+ spos = None
+
+ def _fetch(c):
+ try:
+ if spos is None:
+ return c.popleft()
+ else:
+ return c.pop()
+ except IndexError:
+ return None
+
+ while nargs_spec:
+ nargs = _fetch(nargs_spec)
+ if nargs == 1:
+ rv.append(_fetch(args))
+ elif nargs > 1:
+ x = [_fetch(args) for _ in range(nargs)]
+ # If we're reversed, we're pulling in the arguments in reverse,
+ # so we need to turn them around.
+ if spos is not None:
+ x.reverse()
+ rv.append(tuple(x))
+ elif nargs < 0:
+ if spos is not None:
+ raise TypeError('Cannot have two nargs < 0')
+ spos = len(rv)
+ rv.append(None)
+
+ # spos is the position of the wildcard (star). If it's not `None`,
+ # we fill it with the remainder.
+ if spos is not None:
+ rv[spos] = tuple(args)
+ args = []
+ rv[spos + 1:] = reversed(rv[spos + 1:])
+
+ return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+ if nargs == 1:
+ raise BadOptionUsage('%s option requires an argument' % opt)
+ raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
+
+
+def split_opt(opt):
+ first = opt[:1]
+ if first.isalnum():
+ return '', opt
+ if opt[1:2] == first:
+ return opt[:2], opt[2:]
+ return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+ if ctx is None or ctx.token_normalize_func is None:
+ return opt
+ prefix, opt = split_opt(opt)
+ return prefix + ctx.token_normalize_func(opt)
+
+
+def split_arg_string(string):
+ """Given an argument string this attempts to split it into small parts."""
+ rv = []
+ for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+ r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
+ r'|\S+)\s*', string, re.S):
+ arg = match.group().strip()
+ if arg[:1] == arg[-1:] and arg[:1] in '"\'':
+ arg = arg[1:-1].encode('ascii', 'backslashreplace') \
+ .decode('unicode-escape')
+ try:
+ arg = type(string)(arg)
+ except UnicodeError:
+ pass
+ rv.append(arg)
+ return rv
+
+
+class Option(object):
+
+ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ self._short_opts = []
+ self._long_opts = []
+ self.prefixes = set()
+
+ for opt in opts:
+ prefix, value = split_opt(opt)
+ if not prefix:
+ raise ValueError('Invalid start character for option (%s)'
+ % opt)
+ self.prefixes.add(prefix[0])
+ if len(prefix) == 1 and len(value) == 1:
+ self._short_opts.append(opt)
+ else:
+ self._long_opts.append(opt)
+ self.prefixes.add(prefix)
+
+ if action is None:
+ action = 'store'
+
+ self.dest = dest
+ self.action = action
+ self.nargs = nargs
+ self.const = const
+ self.obj = obj
+
+ @property
+ def takes_value(self):
+ return self.action in ('store', 'append')
+
+ def process(self, value, state):
+ if self.action == 'store':
+ state.opts[self.dest] = value
+ elif self.action == 'store_const':
+ state.opts[self.dest] = self.const
+ elif self.action == 'append':
+ state.opts.setdefault(self.dest, []).append(value)
+ elif self.action == 'append_const':
+ state.opts.setdefault(self.dest, []).append(self.const)
+ elif self.action == 'count':
+ state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+ else:
+ raise ValueError('unknown action %r' % self.action)
+ state.order.append(self.obj)
+
+
+class Argument(object):
+
+ def __init__(self, dest, nargs=1, obj=None):
+ self.dest = dest
+ self.nargs = nargs
+ self.obj = obj
+
+ def process(self, value, state):
+ if self.nargs > 1:
+ holes = sum(1 for x in value if x is None)
+ if holes == len(value):
+ value = None
+ elif holes != 0:
+ raise BadArgumentUsage('argument %s takes %d values'
+ % (self.dest, self.nargs))
+ state.opts[self.dest] = value
+ state.order.append(self.obj)
+
+
+class ParsingState(object):
+
+ def __init__(self, rargs):
+ self.opts = {}
+ self.largs = []
+ self.rargs = rargs
+ self.order = []
+
+
+class OptionParser(object):
+ """The option parser is an internal class that is ultimately used to
+ parse options and arguments. It's modelled after optparse and brings
+ a similar but vastly simplified API. It should generally not be used
+ directly as the high level Click classes wrap it for you.
+
+ It's not nearly as extensible as optparse or argparse as it does not
+ implement features that are implemented on a higher level (such as
+ types or defaults).
+
+ :param ctx: optionally the :class:`~click.Context` where this parser
+ should go with.
+ """
+
+ def __init__(self, ctx=None):
+ #: The :class:`~click.Context` for this parser. This might be
+ #: `None` for some advanced use cases.
+ self.ctx = ctx
+ #: This controls how the parser deals with interspersed arguments.
+ #: If this is set to `False`, the parser will stop on the first
+ #: non-option. Click uses this to implement nested subcommands
+ #: safely.
+ self.allow_interspersed_args = True
+ #: This tells the parser how to deal with unknown options. By
+ #: default it will error out (which is sensible), but there is a
+ #: second mode where it will ignore it and continue processing
+ #: after shifting all the unknown options into the resulting args.
+ self.ignore_unknown_options = False
+ if ctx is not None:
+ self.allow_interspersed_args = ctx.allow_interspersed_args
+ self.ignore_unknown_options = ctx.ignore_unknown_options
+ self._short_opt = {}
+ self._long_opt = {}
+ self._opt_prefixes = set(['-', '--'])
+ self._args = []
+
+ def add_option(self, opts, dest, action=None, nargs=1, const=None,
+ obj=None):
+ """Adds a new option named `dest` to the parser. The destination
+ is not inferred (unlike with optparse) and needs to be explicitly
+ provided. Action can be any of ``store``, ``store_const``,
+ ``append``, ``appnd_const`` or ``count``.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ opts = [normalize_opt(opt, self.ctx) for opt in opts]
+ option = Option(opts, dest, action=action, nargs=nargs,
+ const=const, obj=obj)
+ self._opt_prefixes.update(option.prefixes)
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ def add_argument(self, dest, nargs=1, obj=None):
+ """Adds a positional argument named `dest` to the parser.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+ def parse_args(self, args):
+ """Parses positional arguments and returns ``(values, args, order)``
+ for the parsed options and arguments as well as the leftover
+ arguments if there are any. The order is a list of objects as they
+ appear on the command line. If arguments appear multiple times they
+ will be memorized multiple times as well.
+ """
+ state = ParsingState(args)
+ try:
+ self._process_args_for_options(state)
+ self._process_args_for_args(state)
+ except UsageError:
+ if self.ctx is None or not self.ctx.resilient_parsing:
+ raise
+ return state.opts, state.largs, state.order
+
+ def _process_args_for_args(self, state):
+ pargs, args = _unpack_args(state.largs + state.rargs,
+ [x.nargs for x in self._args])
+
+ for idx, arg in enumerate(self._args):
+ arg.process(pargs[idx], state)
+
+ state.largs = args
+ state.rargs = []
+
+ def _process_args_for_options(self, state):
+ while state.rargs:
+ arg = state.rargs.pop(0)
+ arglen = len(arg)
+ # Double dashes always handled explicitly regardless of what
+ # prefixes are valid.
+ if arg == '--':
+ return
+ elif arg[:1] in self._opt_prefixes and arglen > 1:
+ self._process_opts(arg, state)
+ elif self.allow_interspersed_args:
+ state.largs.append(arg)
+ else:
+ state.rargs.insert(0, arg)
+ return
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt, explicit_value, state):
+ if opt not in self._long_opt:
+ possibilities = [word for word in self._long_opt
+ if word.startswith(opt)]
+ raise NoSuchOption(opt, possibilities=possibilities)
+
+ option = self._long_opt[opt]
+ if option.takes_value:
+ # At this point it's safe to modify rargs by injecting the
+ # explicit value, because no exception is raised in this
+ # branch. This means that the inserted value will be fully
+ # consumed.
+ if explicit_value is not None:
+ state.rargs.insert(0, explicit_value)
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ elif explicit_value is not None:
+ raise BadOptionUsage('%s option does not take a value' % opt)
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ def _match_short_opt(self, arg, state):
+ stop = False
+ i = 1
+ prefix = arg[0]
+ unknown_options = []
+
+ for ch in arg[1:]:
+ opt = normalize_opt(prefix + ch, self.ctx)
+ option = self._short_opt.get(opt)
+ i += 1
+
+ if not option:
+ if self.ignore_unknown_options:
+ unknown_options.append(ch)
+ continue
+ raise NoSuchOption(opt)
+ if option.takes_value:
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ state.rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ if stop:
+ break
+
+ # If we got any unknown options we re-combinate the string of the
+ # remaining options and re-attach the prefix, then report that
+ # to the state as new larg. This way there is basic combinatorics
+ # that can be achieved while still ignoring unknown arguments.
+ if self.ignore_unknown_options and unknown_options:
+ state.largs.append(prefix + ''.join(unknown_options))
+
+ def _process_opts(self, arg, state):
+ explicit_value = None
+ # Long option handling happens in two parts. The first part is
+ # supporting explicitly attached values. In any case, we will try
+ # to long match the option first.
+ if '=' in arg:
+ long_opt, explicit_value = arg.split('=', 1)
+ else:
+ long_opt = arg
+ norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+ # At this point we will match the (assumed) long option through
+ # the long option matching code. Note that this allows options
+ # like "-foo" to be matched as long options.
+ try:
+ self._match_long_opt(norm_long_opt, explicit_value, state)
+ except NoSuchOption:
+ # At this point the long option matching failed, and we need
+ # to try with short options. However there is a special rule
+ # which says, that if we have a two character options prefix
+ # (applies to "--foo" for instance), we do not dispatch to the
+ # short option code and will instead raise the no option
+ # error.
+ if arg[:2] not in self._opt_prefixes:
+ return self._match_short_opt(arg, state)
+ if not self.ignore_unknown_options:
+ raise
+ state.largs.append(arg)
diff --git a/appWSM/lib/python2.7/site-packages/click/termui.py b/appWSM/lib/python2.7/site-packages/click/termui.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9fba52325f35bdf6f060d0843a15bdc95c53c5d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/termui.py
@@ -0,0 +1,539 @@
+import os
+import sys
+import struct
+
+from ._compat import raw_input, text_type, string_types, \
+ isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
+from .utils import echo
+from .exceptions import Abort, UsageError
+from .types import convert_type
+from .globals import resolve_color_default
+
+
+# The prompt functions to use. The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = raw_input
+
+_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
+ 'cyan', 'white', 'reset')
+_ansi_reset_all = '\033[0m'
+
+
+def hidden_prompt_func(prompt):
+ import getpass
+ return getpass.getpass(prompt)
+
+
+def _build_prompt(text, suffix, show_default=False, default=None):
+ prompt = text
+ if default is not None and show_default:
+ prompt = '%s [%s]' % (prompt, default)
+ return prompt + suffix
+
+
+def prompt(text, default=None, hide_input=False,
+ confirmation_prompt=False, type=None,
+ value_proc=None, prompt_suffix=': ',
+ show_default=True, err=False):
+ """Prompts a user for input. This is a convenience function that can
+ be used to prompt a user for input later.
+
+ If the user aborts the input by sending a interrupt signal, this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 6.0
+ Added unicode support for cmd.exe on Windows.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the text to show for the prompt.
+ :param default: the default value to use if no input happens. If this
+ is not given it will prompt until it's aborted.
+ :param hide_input: if this is set to true then the input value will
+ be hidden.
+ :param confirmation_prompt: asks for confirmation for the value.
+ :param type: the type to use to check the value against.
+ :param value_proc: if this parameter is provided it's a function that
+ is invoked instead of the type conversion to
+ convert a value.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ result = None
+
+ def prompt_func(text):
+ f = hide_input and hidden_prompt_func or visible_prompt_func
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(text, nl=False, err=err)
+ return f('')
+ except (KeyboardInterrupt, EOFError):
+ # getpass doesn't print a newline if the user aborts input with ^C.
+ # Allegedly this behavior is inherited from getpass(3).
+ # A doc bug has been filed at https://bugs.python.org/issue24711
+ if hide_input:
+ echo(None, err=err)
+ raise Abort()
+
+ if value_proc is None:
+ value_proc = convert_type(type, default)
+
+ prompt = _build_prompt(text, prompt_suffix, show_default, default)
+
+ while 1:
+ while 1:
+ value = prompt_func(prompt)
+ if value:
+ break
+ # If a default is set and used, then the confirmation
+ # prompt is always skipped because that's the only thing
+ # that really makes sense.
+ elif default is not None:
+ return default
+ try:
+ result = value_proc(value)
+ except UsageError as e:
+ echo('Error: %s' % e.message, err=err)
+ continue
+ if not confirmation_prompt:
+ return result
+ while 1:
+ value2 = prompt_func('Repeat for confirmation: ')
+ if value2:
+ break
+ if value == value2:
+ return result
+ echo('Error: the two entered values do not match', err=err)
+
+
+def confirm(text, default=False, abort=False, prompt_suffix=': ',
+ show_default=True, err=False):
+ """Prompts for confirmation (yes/no question).
+
+ If the user aborts the input by sending a interrupt signal this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the question to ask.
+ :param default: the default for the prompt.
+ :param abort: if this is set to `True` a negative answer aborts the
+ exception by raising :exc:`Abort`.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ prompt = _build_prompt(text, prompt_suffix, show_default,
+ default and 'Y/n' or 'y/N')
+ while 1:
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(prompt, nl=False, err=err)
+ value = visible_prompt_func('').lower().strip()
+ except (KeyboardInterrupt, EOFError):
+ raise Abort()
+ if value in ('y', 'yes'):
+ rv = True
+ elif value in ('n', 'no'):
+ rv = False
+ elif value == '':
+ rv = default
+ else:
+ echo('Error: invalid input', err=err)
+ continue
+ break
+ if abort and not rv:
+ raise Abort()
+ return rv
+
+
+def get_terminal_size():
+ """Returns the current size of the terminal as tuple in the form
+ ``(width, height)`` in columns and rows.
+ """
+ # If shutil has get_terminal_size() (Python 3.3 and later) use that
+ if sys.version_info >= (3, 3):
+ import shutil
+ shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
+ if shutil_get_terminal_size:
+ sz = shutil_get_terminal_size()
+ return sz.columns, sz.lines
+
+ if get_winterm_size is not None:
+ return get_winterm_size()
+
+ def ioctl_gwinsz(fd):
+ try:
+ import fcntl
+ import termios
+ cr = struct.unpack(
+ 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+ except Exception:
+ return
+ return cr
+
+ cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ try:
+ cr = ioctl_gwinsz(fd)
+ finally:
+ os.close(fd)
+ except Exception:
+ pass
+ if not cr or not cr[0] or not cr[1]:
+ cr = (os.environ.get('LINES', 25),
+ os.environ.get('COLUMNS', DEFAULT_COLUMNS))
+ return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text, color=None):
+ """This function takes a text and shows it via an environment specific
+ pager on stdout.
+
+ .. versionchanged:: 3.0
+ Added the `color` flag.
+
+ :param text: the text to page.
+ :param color: controls if the pager supports ANSI colors or not. The
+ default is autodetection.
+ """
+ color = resolve_color_default(color)
+ if not isinstance(text, string_types):
+ text = text_type(text)
+ from ._termui_impl import pager
+ return pager(text + '\n', color)
+
+
+def progressbar(iterable=None, length=None, label=None, show_eta=True,
+ show_percent=None, show_pos=False,
+ item_show_func=None, fill_char='#', empty_char='-',
+ bar_template='%(label)s [%(bar)s] %(info)s',
+ info_sep=' ', width=36, file=None, color=None):
+ """This function creates an iterable context manager that can be used
+ to iterate over something while showing a progress bar. It will
+ either iterate over the `iterable` or `length` items (that are counted
+ up). While iteration happens, this function will print a rendered
+ progress bar to the given `file` (defaults to stdout) and will attempt
+ to calculate remaining time and more. By default, this progress bar
+ will not be rendered if the file is not a terminal.
+
+ The context manager creates the progress bar. When the context
+ manager is entered the progress bar is already displayed. With every
+ iteration over the progress bar, the iterable passed to the bar is
+ advanced and the bar is updated. When the context manager exits,
+ a newline is printed and the progress bar is finalized on screen.
+
+ No printing must happen or the progress bar will be unintentionally
+ destroyed.
+
+ Example usage::
+
+ with progressbar(items) as bar:
+ for item in bar:
+ do_something_with(item)
+
+ Alternatively, if no iterable is specified, one can manually update the
+ progress bar through the `update()` method instead of directly
+ iterating over the progress bar. The update method accepts the number
+ of steps to increment the bar with::
+
+ with progressbar(length=chunks.total_bytes) as bar:
+ for chunk in chunks:
+ process_chunk(chunk)
+ bar.update(chunks.bytes)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `color` parameter. Added a `update` method to the
+ progressbar object.
+
+ :param iterable: an iterable to iterate over. If not provided the length
+ is required.
+ :param length: the number of items to iterate over. By default the
+ progressbar will attempt to ask the iterator about its
+ length, which might or might not work. If an iterable is
+ also provided this parameter can be used to override the
+ length. If an iterable is not provided the progress bar
+ will iterate over a range of that length.
+ :param label: the label to show next to the progress bar.
+ :param show_eta: enables or disables the estimated time display. This is
+ automatically disabled if the length cannot be
+ determined.
+ :param show_percent: enables or disables the percentage display. The
+ default is `True` if the iterable has a length or
+ `False` if not.
+ :param show_pos: enables or disables the absolute position display. The
+ default is `False`.
+ :param item_show_func: a function called with the current item which
+ can return a string to show the current item
+ next to the progress bar. Note that the current
+ item can be `None`!
+ :param fill_char: the character to use to show the filled part of the
+ progress bar.
+ :param empty_char: the character to use to show the non-filled part of
+ the progress bar.
+ :param bar_template: the format string to use as template for the bar.
+ The parameters in it are ``label`` for the label,
+ ``bar`` for the progress bar and ``info`` for the
+ info section.
+ :param info_sep: the separator between multiple info items (eta etc.)
+ :param width: the width of the progress bar in characters, 0 means full
+ terminal width
+ :param file: the file to write to. If this is not a terminal then
+ only the label is printed.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are included anywhere in the progress bar output
+ which is not the case by default.
+ """
+ from ._termui_impl import ProgressBar
+ color = resolve_color_default(color)
+ return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
+ show_percent=show_percent, show_pos=show_pos,
+ item_show_func=item_show_func, fill_char=fill_char,
+ empty_char=empty_char, bar_template=bar_template,
+ info_sep=info_sep, file=file, label=label,
+ width=width, color=color)
+
+
+def clear():
+ """Clears the terminal screen. This will have the effect of clearing
+ the whole visible space of the terminal and moving the cursor to the
+ top left. This does not do anything if not connected to a terminal.
+
+ .. versionadded:: 2.0
+ """
+ if not isatty(sys.stdout):
+ return
+ # If we're on Windows and we don't have colorama available, then we
+ # clear the screen by shelling out. Otherwise we can use an escape
+ # sequence.
+ if WIN:
+ os.system('cls')
+ else:
+ sys.stdout.write('\033[2J\033[1;1H')
+
+
+def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
+ blink=None, reverse=None, reset=True):
+ """Styles a text with ANSI styles and returns the new string. By
+ default the styling is self contained which means that at the end
+ of the string a reset code is issued. This can be prevented by
+ passing ``reset=False``.
+
+ Examples::
+
+ click.echo(click.style('Hello World!', fg='green'))
+ click.echo(click.style('ATTENTION!', blink=True))
+ click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+ Supported color names:
+
+ * ``black`` (might be a gray)
+ * ``red``
+ * ``green``
+ * ``yellow`` (might be an orange)
+ * ``blue``
+ * ``magenta``
+ * ``cyan``
+ * ``white`` (might be light gray)
+ * ``reset`` (reset the color code only)
+
+ .. versionadded:: 2.0
+
+ :param text: the string to style with ansi codes.
+ :param fg: if provided this will become the foreground color.
+ :param bg: if provided this will become the background color.
+ :param bold: if provided this will enable or disable bold mode.
+ :param dim: if provided this will enable or disable dim mode. This is
+ badly supported.
+ :param underline: if provided this will enable or disable underline.
+ :param blink: if provided this will enable or disable blinking.
+ :param reverse: if provided this will enable or disable inverse
+ rendering (foreground becomes background and the
+ other way round).
+ :param reset: by default a reset-all code is added at the end of the
+ string which means that styles do not carry over. This
+ can be disabled to compose styles.
+ """
+ bits = []
+ if fg:
+ try:
+ bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
+ except ValueError:
+ raise TypeError('Unknown color %r' % fg)
+ if bg:
+ try:
+ bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
+ except ValueError:
+ raise TypeError('Unknown color %r' % bg)
+ if bold is not None:
+ bits.append('\033[%dm' % (1 if bold else 22))
+ if dim is not None:
+ bits.append('\033[%dm' % (2 if dim else 22))
+ if underline is not None:
+ bits.append('\033[%dm' % (4 if underline else 24))
+ if blink is not None:
+ bits.append('\033[%dm' % (5 if blink else 25))
+ if reverse is not None:
+ bits.append('\033[%dm' % (7 if reverse else 27))
+ bits.append(text)
+ if reset:
+ bits.append(_ansi_reset_all)
+ return ''.join(bits)
+
+
+def unstyle(text):
+ """Removes ANSI styling information from a string. Usually it's not
+ necessary to use this function as Click's echo function will
+ automatically remove styling if necessary.
+
+ .. versionadded:: 2.0
+
+ :param text: the text to remove style information from.
+ """
+ return strip_ansi(text)
+
+
+def secho(text, file=None, nl=True, err=False, color=None, **styles):
+ """This function combines :func:`echo` and :func:`style` into one
+ call. As such the following two calls are the same::
+
+ click.secho('Hello World!', fg='green')
+ click.echo(click.style('Hello World!', fg='green'))
+
+ All keyword arguments are forwarded to the underlying functions
+ depending on which one they go with.
+
+ .. versionadded:: 2.0
+ """
+ return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
+
+
+def edit(text=None, editor=None, env=None, require_save=True,
+ extension='.txt', filename=None):
+ r"""Edits the given text in the defined editor. If an editor is given
+ (should be the full path to the executable but the regular operating
+ system search path is used for finding the executable) it overrides
+ the detected editor. Optionally, some environment variables can be
+ used. If the editor is closed without changes, `None` is returned. In
+ case a file is edited directly the return value is always `None` and
+ `require_save` and `extension` are ignored.
+
+ If the editor cannot be opened a :exc:`UsageError` is raised.
+
+ Note for Windows: to simplify cross-platform usage, the newlines are
+ automatically converted from POSIX to Windows and vice versa. As such,
+ the message here will have ``\n`` as newline markers.
+
+ :param text: the text to edit.
+ :param editor: optionally the editor to use. Defaults to automatic
+ detection.
+ :param env: environment variables to forward to the editor.
+ :param require_save: if this is true, then not saving in the editor
+ will make the return value become `None`.
+ :param extension: the extension to tell the editor about. This defaults
+ to `.txt` but changing this might change syntax
+ highlighting.
+ :param filename: if provided it will edit this file instead of the
+ provided text contents. It will not use a temporary
+ file as an indirection in that case.
+ """
+ from ._termui_impl import Editor
+ editor = Editor(editor=editor, env=env, require_save=require_save,
+ extension=extension)
+ if filename is None:
+ return editor.edit(text)
+ editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+ """This function launches the given URL (or filename) in the default
+ viewer application for this file type. If this is an executable, it
+ might launch the executable in a new session. The return value is
+ the exit code of the launched application. Usually, ``0`` indicates
+ success.
+
+ Examples::
+
+ click.launch('http://click.pocoo.org/')
+ click.launch('/my/downloaded/file', locate=True)
+
+ .. versionadded:: 2.0
+
+ :param url: URL or filename of the thing to launch.
+ :param wait: waits for the program to stop.
+ :param locate: if this is set to `True` then instead of launching the
+ application associated with the URL it will attempt to
+ launch a file manager with the file located. This
+ might have weird effects if the URL does not point to
+ the filesystem.
+ """
+ from ._termui_impl import open_url
+ return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead. This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+ """Fetches a single character from the terminal and returns it. This
+ will always return a unicode character and under certain rare
+ circumstances this might return more than one character. The
+ situations which more than one character is returned is when for
+ whatever reason multiple characters end up in the terminal buffer or
+ standard input was not actually a terminal.
+
+ Note that this will always read from the terminal, even if something
+ is piped into the standard input.
+
+ .. versionadded:: 2.0
+
+ :param echo: if set to `True`, the character read will also show up on
+ the terminal. The default is to not show it.
+ """
+ f = _getchar
+ if f is None:
+ from ._termui_impl import getchar as f
+ return f(echo)
+
+
+def pause(info='Press any key to continue ...', err=False):
+ """This command stops execution and waits for the user to press any
+ key to continue. This is similar to the Windows batch "pause"
+ command. If the program is not run through a terminal, this command
+ will instead do nothing.
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param info: the info string to print before pausing.
+ :param err: if set to message goes to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ if not isatty(sys.stdin) or not isatty(sys.stdout):
+ return
+ try:
+ if info:
+ echo(info, nl=False, err=err)
+ try:
+ getchar()
+ except (KeyboardInterrupt, EOFError):
+ pass
+ finally:
+ if info:
+ echo(err=err)
diff --git a/appWSM/lib/python2.7/site-packages/click/testing.py b/appWSM/lib/python2.7/site-packages/click/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..4416c774136dc9b9a4edd4adb02699137768b7fb
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/testing.py
@@ -0,0 +1,322 @@
+import os
+import sys
+import shutil
+import tempfile
+import contextlib
+
+from ._compat import iteritems, PY2
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered. Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+if PY2:
+ from cStringIO import StringIO
+else:
+ import io
+ from ._compat import _find_binary_reader
+
+
+class EchoingStdin(object):
+
+ def __init__(self, input, output):
+ self._input = input
+ self._output = output
+
+ def __getattr__(self, x):
+ return getattr(self._input, x)
+
+ def _echo(self, rv):
+ self._output.write(rv)
+ return rv
+
+ def read(self, n=-1):
+ return self._echo(self._input.read(n))
+
+ def readline(self, n=-1):
+ return self._echo(self._input.readline(n))
+
+ def readlines(self):
+ return [self._echo(x) for x in self._input.readlines()]
+
+ def __iter__(self):
+ return iter(self._echo(x) for x in self._input)
+
+ def __repr__(self):
+ return repr(self._input)
+
+
+def make_input_stream(input, charset):
+ # Is already an input stream.
+ if hasattr(input, 'read'):
+ if PY2:
+ return input
+ rv = _find_binary_reader(input)
+ if rv is not None:
+ return rv
+ raise TypeError('Could not find binary reader for input stream.')
+
+ if input is None:
+ input = b''
+ elif not isinstance(input, bytes):
+ input = input.encode(charset)
+ if PY2:
+ return StringIO(input)
+ return io.BytesIO(input)
+
+
+class Result(object):
+ """Holds the captured result of an invoked CLI script."""
+
+ def __init__(self, runner, output_bytes, exit_code, exception,
+ exc_info=None):
+ #: The runner that created the result
+ self.runner = runner
+ #: The output as bytes.
+ self.output_bytes = output_bytes
+ #: The exit code as integer.
+ self.exit_code = exit_code
+ #: The exception that happend if one did.
+ self.exception = exception
+ #: The traceback
+ self.exc_info = exc_info
+
+ @property
+ def output(self):
+ """The output as unicode string."""
+ return self.output_bytes.decode(self.runner.charset, 'replace') \
+ .replace('\r\n', '\n')
+
+ def __repr__(self):
+ return '' % (
+ self.exception and repr(self.exception) or 'okay',
+ )
+
+
+class CliRunner(object):
+ """The CLI runner provides functionality to invoke a Click command line
+ script for unittesting purposes in a isolated environment. This only
+ works in single-threaded systems without any concurrency as it changes the
+ global interpreter state.
+
+ :param charset: the character set for the input and output data. This is
+ UTF-8 by default and should not be changed currently as
+ the reporting to Click only works in Python 2 properly.
+ :param env: a dictionary with environment variables for overriding.
+ :param echo_stdin: if this is set to `True`, then reading from stdin writes
+ to stdout. This is useful for showing examples in
+ some circumstances. Note that regular prompts
+ will automatically echo the input.
+ """
+
+ def __init__(self, charset=None, env=None, echo_stdin=False):
+ if charset is None:
+ charset = 'utf-8'
+ self.charset = charset
+ self.env = env or {}
+ self.echo_stdin = echo_stdin
+
+ def get_default_prog_name(self, cli):
+ """Given a command object it will return the default program name
+ for it. The default is the `name` attribute or ``"root"`` if not
+ set.
+ """
+ return cli.name or 'root'
+
+ def make_env(self, overrides=None):
+ """Returns the environment overrides for invoking a script."""
+ rv = dict(self.env)
+ if overrides:
+ rv.update(overrides)
+ return rv
+
+ @contextlib.contextmanager
+ def isolation(self, input=None, env=None, color=False):
+ """A context manager that sets up the isolation for invoking of a
+ command line tool. This sets up stdin with the given input data
+ and `os.environ` with the overrides from the given dictionary.
+ This also rebinds some internals in Click to be mocked (like the
+ prompt functionality).
+
+ This is automatically done in the :meth:`invoke` method.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param input: the input stream to put into sys.stdin.
+ :param env: the environment overrides as dictionary.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ input = make_input_stream(input, self.charset)
+
+ old_stdin = sys.stdin
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ old_forced_width = clickpkg.formatting.FORCED_WIDTH
+ clickpkg.formatting.FORCED_WIDTH = 80
+
+ env = self.make_env(env)
+
+ if PY2:
+ sys.stdout = sys.stderr = bytes_output = StringIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ else:
+ bytes_output = io.BytesIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ input = io.TextIOWrapper(input, encoding=self.charset)
+ sys.stdout = sys.stderr = io.TextIOWrapper(
+ bytes_output, encoding=self.charset)
+
+ sys.stdin = input
+
+ def visible_input(prompt=None):
+ sys.stdout.write(prompt or '')
+ val = input.readline().rstrip('\r\n')
+ sys.stdout.write(val + '\n')
+ sys.stdout.flush()
+ return val
+
+ def hidden_input(prompt=None):
+ sys.stdout.write((prompt or '') + '\n')
+ sys.stdout.flush()
+ return input.readline().rstrip('\r\n')
+
+ def _getchar(echo):
+ char = sys.stdin.read(1)
+ if echo:
+ sys.stdout.write(char)
+ sys.stdout.flush()
+ return char
+
+ default_color = color
+ def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ return not default_color
+ return not color
+
+ old_visible_prompt_func = clickpkg.termui.visible_prompt_func
+ old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
+ old__getchar_func = clickpkg.termui._getchar
+ old_should_strip_ansi = clickpkg.utils.should_strip_ansi
+ clickpkg.termui.visible_prompt_func = visible_input
+ clickpkg.termui.hidden_prompt_func = hidden_input
+ clickpkg.termui._getchar = _getchar
+ clickpkg.utils.should_strip_ansi = should_strip_ansi
+
+ old_env = {}
+ try:
+ for key, value in iteritems(env):
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ yield bytes_output
+ finally:
+ for key, value in iteritems(old_env):
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ sys.stdin = old_stdin
+ clickpkg.termui.visible_prompt_func = old_visible_prompt_func
+ clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
+ clickpkg.termui._getchar = old__getchar_func
+ clickpkg.utils.should_strip_ansi = old_should_strip_ansi
+ clickpkg.formatting.FORCED_WIDTH = old_forced_width
+
+ def invoke(self, cli, args=None, input=None, env=None,
+ catch_exceptions=True, color=False, **extra):
+ """Invokes a command in an isolated environment. The arguments are
+ forwarded directly to the command line script, the `extra` keyword
+ arguments are passed to the :meth:`~clickpkg.Command.main` function of
+ the command.
+
+ This returns a :class:`Result` object.
+
+ .. versionadded:: 3.0
+ The ``catch_exceptions`` parameter was added.
+
+ .. versionchanged:: 3.0
+ The result object now has an `exc_info` attribute with the
+ traceback if available.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param cli: the command to invoke
+ :param args: the arguments to invoke
+ :param input: the input data for `sys.stdin`.
+ :param env: the environment overrides.
+ :param catch_exceptions: Whether to catch any other exceptions than
+ ``SystemExit``.
+ :param extra: the keyword arguments to pass to :meth:`main`.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ exc_info = None
+ with self.isolation(input=input, env=env, color=color) as out:
+ exception = None
+ exit_code = 0
+
+ try:
+ cli.main(args=args or (),
+ prog_name=self.get_default_prog_name(cli), **extra)
+ except SystemExit as e:
+ if e.code != 0:
+ exception = e
+
+ exc_info = sys.exc_info()
+
+ exit_code = e.code
+ if not isinstance(exit_code, int):
+ sys.stdout.write(str(exit_code))
+ sys.stdout.write('\n')
+ exit_code = 1
+ except Exception as e:
+ if not catch_exceptions:
+ raise
+ exception = e
+ exit_code = -1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+ output = out.getvalue()
+
+ return Result(runner=self,
+ output_bytes=output,
+ exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info)
+
+ @contextlib.contextmanager
+ def isolated_filesystem(self):
+ """A context manager that creates a temporary folder and changes
+ the current working directory to it for isolated filesystem tests.
+ """
+ cwd = os.getcwd()
+ t = tempfile.mkdtemp()
+ os.chdir(t)
+ try:
+ yield t
+ finally:
+ os.chdir(cwd)
+ try:
+ shutil.rmtree(t)
+ except (OSError, IOError):
+ pass
diff --git a/appWSM/lib/python2.7/site-packages/click/types.py b/appWSM/lib/python2.7/site-packages/click/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..36390026dcd1784f6d85f854730faa31ebf95b55
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/types.py
@@ -0,0 +1,550 @@
+import os
+import stat
+
+from ._compat import open_stream, text_type, filename_to_ui, \
+ get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2
+from .exceptions import BadParameter
+from .utils import safecall, LazyFile
+
+
+class ParamType(object):
+ """Helper for converting values through types. The following is
+ necessary for a valid type:
+
+ * it needs a name
+ * it needs to pass through None unchanged
+ * it needs to convert from a string
+ * it needs to convert its result type through unchanged
+ (eg: needs to be idempotent)
+ * it needs to be able to deal with param and context being `None`.
+ This can be the case when the object is used with prompt
+ inputs.
+ """
+ is_composite = False
+
+ #: the descriptive name of this type
+ name = None
+
+ #: if a list of this type is expected and the value is pulled from a
+ #: string environment variable, this is what splits it up. `None`
+ #: means any whitespace. For all parameters the general rule is that
+ #: whitespace splits them up. The exception are paths and files which
+ #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+ #: Windows).
+ envvar_list_splitter = None
+
+ def __call__(self, value, param=None, ctx=None):
+ if value is not None:
+ return self.convert(value, param, ctx)
+
+ def get_metavar(self, param):
+ """Returns the metavar default for this param if it provides one."""
+
+ def get_missing_message(self, param):
+ """Optionally might return extra information about a missing
+ parameter.
+
+ .. versionadded:: 2.0
+ """
+
+ def convert(self, value, param, ctx):
+ """Converts the value. This is not invoked for values that are
+ `None` (the missing value).
+ """
+ return value
+
+ def split_envvar_value(self, rv):
+ """Given a value from an environment variable this splits it up
+ into small chunks depending on the defined envvar list splitter.
+
+ If the splitter is set to `None`, which means that whitespace splits,
+ then leading and trailing whitespace is ignored. Otherwise, leading
+ and trailing splitters usually lead to empty items being included.
+ """
+ return (rv or '').split(self.envvar_list_splitter)
+
+ def fail(self, message, param=None, ctx=None):
+ """Helper method to fail with an invalid value message."""
+ raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+ is_composite = True
+
+ @property
+ def arity(self):
+ raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+
+ def __init__(self, func):
+ self.name = func.__name__
+ self.func = func
+
+ def convert(self, value, param, ctx):
+ try:
+ return self.func(value)
+ except ValueError:
+ try:
+ value = text_type(value)
+ except UnicodeError:
+ value = str(value).decode('utf-8', 'replace')
+ self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+ name = 'text'
+
+ def convert(self, value, param, ctx):
+ return value
+
+ def __repr__(self):
+ return 'UNPROCESSED'
+
+
+class StringParamType(ParamType):
+ name = 'text'
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bytes):
+ enc = _get_argv_encoding()
+ try:
+ value = value.decode(enc)
+ except UnicodeError:
+ fs_enc = get_filesystem_encoding()
+ if fs_enc != enc:
+ try:
+ value = value.decode(fs_enc)
+ except UnicodeError:
+ value = value.decode('utf-8', 'replace')
+ return value
+ return value
+
+ def __repr__(self):
+ return 'STRING'
+
+
+class Choice(ParamType):
+ """The choice type allows a value to be checked against a fixed set of
+ supported values. All of these values have to be strings.
+
+ See :ref:`choice-opts` for an example.
+ """
+ name = 'choice'
+
+ def __init__(self, choices):
+ self.choices = choices
+
+ def get_metavar(self, param):
+ return '[%s]' % '|'.join(self.choices)
+
+ def get_missing_message(self, param):
+ return 'Choose from %s.' % ', '.join(self.choices)
+
+ def convert(self, value, param, ctx):
+ # Exact match
+ if value in self.choices:
+ return value
+
+ # Match through normalization
+ if ctx is not None and \
+ ctx.token_normalize_func is not None:
+ value = ctx.token_normalize_func(value)
+ for choice in self.choices:
+ if ctx.token_normalize_func(choice) == value:
+ return choice
+
+ self.fail('invalid choice: %s. (choose from %s)' %
+ (value, ', '.join(self.choices)), param, ctx)
+
+ def __repr__(self):
+ return 'Choice(%r)' % list(self.choices)
+
+
+class IntParamType(ParamType):
+ name = 'integer'
+
+ def convert(self, value, param, ctx):
+ try:
+ return int(value)
+ except (ValueError, UnicodeError):
+ self.fail('%s is not a valid integer' % value, param, ctx)
+
+ def __repr__(self):
+ return 'INT'
+
+
+class IntRange(IntParamType):
+ """A parameter that works similar to :data:`click.INT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+ name = 'integer range'
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = IntParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if self.min is not None and rv < self.min or \
+ self.max is not None and rv > self.max:
+ if self.min is None:
+ self.fail('%s is bigger than the maximum valid value '
+ '%s.' % (rv, self.max), param, ctx)
+ elif self.max is None:
+ self.fail('%s is smaller than the minimum valid value '
+ '%s.' % (rv, self.min), param, ctx)
+ else:
+ self.fail('%s is not in the valid range of %s to %s.'
+ % (rv, self.min, self.max), param, ctx)
+ return rv
+
+ def __repr__(self):
+ return 'IntRange(%r, %r)' % (self.min, self.max)
+
+
+class BoolParamType(ParamType):
+ name = 'boolean'
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bool):
+ return bool(value)
+ value = value.lower()
+ if value in ('true', '1', 'yes', 'y'):
+ return True
+ elif value in ('false', '0', 'no', 'n'):
+ return False
+ self.fail('%s is not a valid boolean' % value, param, ctx)
+
+ def __repr__(self):
+ return 'BOOL'
+
+
+class FloatParamType(ParamType):
+ name = 'float'
+
+ def convert(self, value, param, ctx):
+ try:
+ return float(value)
+ except (UnicodeError, ValueError):
+ self.fail('%s is not a valid floating point value' %
+ value, param, ctx)
+
+ def __repr__(self):
+ return 'FLOAT'
+
+
+class UUIDParameterType(ParamType):
+ name = 'uuid'
+
+ def convert(self, value, param, ctx):
+ import uuid
+ try:
+ if PY2 and isinstance(value, text_type):
+ value = value.encode('ascii')
+ return uuid.UUID(value)
+ except (UnicodeError, ValueError):
+ self.fail('%s is not a valid UUID value' % value, param, ctx)
+
+ def __repr__(self):
+ return 'UUID'
+
+
+class File(ParamType):
+ """Declares a parameter to be a file for reading or writing. The file
+ is automatically closed once the context tears down (after the command
+ finished working).
+
+ Files can be opened for reading or writing. The special value ``-``
+ indicates stdin or stdout depending on the mode.
+
+ By default, the file is opened for reading text data, but it can also be
+ opened in binary mode or for writing. The encoding parameter can be used
+ to force a specific encoding.
+
+ The `lazy` flag controls if the file should be opened immediately or
+ upon first IO. The default is to be non lazy for standard input and
+ output streams as well as files opened for reading, lazy otherwise.
+
+ Starting with Click 2.0, files can also be opened atomically in which
+ case all writes go into a separate file in the same folder and upon
+ completion the file will be moved over to the original location. This
+ is useful if a file regularly read by other users is modified.
+
+ See :ref:`file-args` for more information.
+ """
+ name = 'filename'
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
+ atomic=False):
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.lazy = lazy
+ self.atomic = atomic
+
+ def resolve_lazy_flag(self, value):
+ if self.lazy is not None:
+ return self.lazy
+ if value == '-':
+ return False
+ elif 'w' in self.mode:
+ return True
+ return False
+
+ def convert(self, value, param, ctx):
+ try:
+ if hasattr(value, 'read') or hasattr(value, 'write'):
+ return value
+
+ lazy = self.resolve_lazy_flag(value)
+
+ if lazy:
+ f = LazyFile(value, self.mode, self.encoding, self.errors,
+ atomic=self.atomic)
+ if ctx is not None:
+ ctx.call_on_close(f.close_intelligently)
+ return f
+
+ f, should_close = open_stream(value, self.mode,
+ self.encoding, self.errors,
+ atomic=self.atomic)
+ # If a context is provided, we automatically close the file
+ # at the end of the context execution (or flush out). If a
+ # context does not exist, it's the caller's responsibility to
+ # properly close the file. This for instance happens when the
+ # type is used with prompts.
+ if ctx is not None:
+ if should_close:
+ ctx.call_on_close(safecall(f.close))
+ else:
+ ctx.call_on_close(safecall(f.flush))
+ return f
+ except (IOError, OSError) as e:
+ self.fail('Could not open file: %s: %s' % (
+ filename_to_ui(value),
+ get_streerror(e),
+ ), param, ctx)
+
+
+class Path(ParamType):
+ """The path type is similar to the :class:`File` type but it performs
+ different checks. First of all, instead of returning an open file
+ handle it returns just the filename. Secondly, it can perform various
+ basic checks about what the file or directory should be.
+
+ .. versionchanged:: 6.0
+ `allow_dash` was added.
+
+ :param exists: if set to true, the file or directory needs to exist for
+ this value to be valid. If this is not required and a
+ file does indeed not exist, then all further checks are
+ silently skipped.
+ :param file_okay: controls if a file is a possible value.
+ :param dir_okay: controls if a directory is a possible value.
+ :param writable: if true, a writable check is performed.
+ :param readable: if true, a readable check is performed.
+ :param resolve_path: if this is true, then the path is fully resolved
+ before the value is passed onwards. This means
+ that it's absolute and symlinks are resolved.
+ :param allow_dash: If this is set to `True`, a single dash to indicate
+ standard streams is permitted.
+ :param type: optionally a string type that should be used to
+ represent the path. The default is `None` which
+ means the return value will be either bytes or
+ unicode depending on what makes most sense given the
+ input data Click deals with.
+ """
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(self, exists=False, file_okay=True, dir_okay=True,
+ writable=False, readable=True, resolve_path=False,
+ allow_dash=False, path_type=None):
+ self.exists = exists
+ self.file_okay = file_okay
+ self.dir_okay = dir_okay
+ self.writable = writable
+ self.readable = readable
+ self.resolve_path = resolve_path
+ self.allow_dash = allow_dash
+ self.type = path_type
+
+ if self.file_okay and not self.dir_okay:
+ self.name = 'file'
+ self.path_type = 'File'
+ if self.dir_okay and not self.file_okay:
+ self.name = 'directory'
+ self.path_type = 'Directory'
+ else:
+ self.name = 'path'
+ self.path_type = 'Path'
+
+ def coerce_path_result(self, rv):
+ if self.type is not None and not isinstance(rv, self.type):
+ if self.type is text_type:
+ rv = rv.decode(get_filesystem_encoding())
+ else:
+ rv = rv.encode(get_filesystem_encoding())
+ return rv
+
+ def convert(self, value, param, ctx):
+ rv = value
+
+ is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-')
+
+ if not is_dash:
+ if self.resolve_path:
+ rv = os.path.realpath(rv)
+
+ try:
+ st = os.stat(rv)
+ except OSError:
+ if not self.exists:
+ return self.coerce_path_result(rv)
+ self.fail('%s "%s" does not exist.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+
+ if not self.file_okay and stat.S_ISREG(st.st_mode):
+ self.fail('%s "%s" is a file.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+ self.fail('%s "%s" is a directory.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if self.writable and not os.access(value, os.W_OK):
+ self.fail('%s "%s" is not writable.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if self.readable and not os.access(value, os.R_OK):
+ self.fail('%s "%s" is not readable.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+
+ return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+ """The default behavior of Click is to apply a type on a value directly.
+ This works well in most cases, except for when `nargs` is set to a fixed
+ count and different types should be used for different items. In this
+ case the :class:`Tuple` type can be used. This type can only be used
+ if `nargs` is set to a fixed number.
+
+ For more information see :ref:`tuple-type`.
+
+ This can be selected by using a Python tuple literal as a type.
+
+ :param types: a list of types that should be used for the tuple items.
+ """
+
+ def __init__(self, types):
+ self.types = [convert_type(ty) for ty in types]
+
+ @property
+ def name(self):
+ return "<" + " ".join(ty.name for ty in self.types) + ">"
+
+ @property
+ def arity(self):
+ return len(self.types)
+
+ def convert(self, value, param, ctx):
+ if len(value) != len(self.types):
+ raise TypeError('It would appear that nargs is set to conflict '
+ 'with the composite type arity.')
+ return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+ """Converts a callable or python ty into the most appropriate param
+ ty.
+ """
+ guessed_type = False
+ if ty is None and default is not None:
+ if isinstance(default, tuple):
+ ty = tuple(map(type, default))
+ else:
+ ty = type(default)
+ guessed_type = True
+
+ if isinstance(ty, tuple):
+ return Tuple(ty)
+ if isinstance(ty, ParamType):
+ return ty
+ if ty is text_type or ty is str or ty is None:
+ return STRING
+ if ty is int:
+ return INT
+ # Booleans are only okay if not guessed. This is done because for
+ # flags the default value is actually a bit of a lie in that it
+ # indicates which of the flags is the one we want. See get_default()
+ # for more information.
+ if ty is bool and not guessed_type:
+ return BOOL
+ if ty is float:
+ return FLOAT
+ if guessed_type:
+ return STRING
+
+ # Catch a common mistake
+ if __debug__:
+ try:
+ if issubclass(ty, ParamType):
+ raise AssertionError('Attempted to use an uninstantiated '
+ 'parameter type (%s).' % ty)
+ except TypeError:
+ pass
+ return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing. From a user's
+#: perspective this appears to just be the same as `STRING` but internally
+#: no string conversion takes place. This is necessary to achieve the
+#: same bytes/unicode behavior on Python 2/3 in situations where you want
+#: to not convert argument types. This is usually useful when working
+#: with file paths as they can appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default. This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter. This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter. This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter. This is the default for boolean flags. This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
diff --git a/appWSM/lib/python2.7/site-packages/click/utils.py b/appWSM/lib/python2.7/site-packages/click/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..eee626d3fd0153486345605815ca5f748eb63e7e
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/click/utils.py
@@ -0,0 +1,415 @@
+import os
+import sys
+
+from .globals import resolve_color_default
+
+from ._compat import text_type, open_stream, get_filesystem_encoding, \
+ get_streerror, string_types, PY2, binary_streams, text_streams, \
+ filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
+ _default_text_stdout, _default_text_stderr, is_bytes, WIN
+
+if not PY2:
+ from ._compat import _find_binary_writer
+elif WIN:
+ from ._winconsole import _get_windows_argv, \
+ _hash_py_argv, _initial_argv_hash
+
+
+echo_native_types = string_types + (bytes, bytearray)
+
+
+def _posixify(name):
+ return '-'.join(name.split()).lower()
+
+
+def safecall(func):
+ """Wraps a function so that it swallows exceptions."""
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ pass
+ return wrapper
+
+
+def make_str(value):
+ """Converts a value into a valid string."""
+ if isinstance(value, bytes):
+ try:
+ return value.decode(get_filesystem_encoding())
+ except UnicodeError:
+ return value.decode('utf-8', 'replace')
+ return text_type(value)
+
+
+def make_default_short_help(help, max_length=45):
+ words = help.split()
+ total_length = 0
+ result = []
+ done = False
+
+ for word in words:
+ if word[-1:] == '.':
+ done = True
+ new_length = result and 1 + len(word) or len(word)
+ if total_length + new_length > max_length:
+ result.append('...')
+ done = True
+ else:
+ if result:
+ result.append(' ')
+ result.append(word)
+ if done:
+ break
+ total_length += new_length
+
+ return ''.join(result)
+
+
+class LazyFile(object):
+ """A lazy file works like a regular file but it does not fully open
+ the file but it does perform some basic checks early to see if the
+ filename parameter does make sense. This is useful for safely opening
+ files for writing.
+ """
+
+ def __init__(self, filename, mode='r', encoding=None, errors='strict',
+ atomic=False):
+ self.name = filename
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.atomic = atomic
+
+ if filename == '-':
+ self._f, self.should_close = open_stream(filename, mode,
+ encoding, errors)
+ else:
+ if 'r' in mode:
+ # Open and close the file in case we're opening it for
+ # reading so that we can catch at least some errors in
+ # some cases early.
+ open(filename, mode).close()
+ self._f = None
+ self.should_close = True
+
+ def __getattr__(self, name):
+ return getattr(self.open(), name)
+
+ def __repr__(self):
+ if self._f is not None:
+ return repr(self._f)
+ return '' % (self.name, self.mode)
+
+ def open(self):
+ """Opens the file if it's not yet open. This call might fail with
+ a :exc:`FileError`. Not handling this error will produce an error
+ that Click shows.
+ """
+ if self._f is not None:
+ return self._f
+ try:
+ rv, self.should_close = open_stream(self.name, self.mode,
+ self.encoding,
+ self.errors,
+ atomic=self.atomic)
+ except (IOError, OSError) as e:
+ from .exceptions import FileError
+ raise FileError(self.name, hint=get_streerror(e))
+ self._f = rv
+ return rv
+
+ def close(self):
+ """Closes the underlying file, no matter what."""
+ if self._f is not None:
+ self._f.close()
+
+ def close_intelligently(self):
+ """This function only closes the file if it was opened by the lazy
+ file wrapper. For instance this will never close stdin.
+ """
+ if self.should_close:
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close_intelligently()
+
+ def __iter__(self):
+ self.open()
+ return iter(self._f)
+
+
+class KeepOpenFile(object):
+
+ def __init__(self, file):
+ self._file = file
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ pass
+
+ def __repr__(self):
+ return repr(self._file)
+
+ def __iter__(self):
+ return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+ """Prints a message plus a newline to the given file or stdout. On
+ first sight, this looks like the print function, but it has improved
+ support for handling Unicode and binary data that does not fail no
+ matter how badly configured the system is.
+
+ Primarily it means that you can print binary data as well as Unicode
+ data on both 2.x and 3.x to the given file in the most appropriate way
+ possible. This is a very carefree function as in that it will try its
+ best to not fail. As of Click 6.0 this includes support for unicode
+ output on the Windows console.
+
+ In addition to that, if `colorama`_ is installed, the echo function will
+ also support clever handling of ANSI codes. Essentially it will then
+ do the following:
+
+ - add transparent handling of ANSI color codes on Windows.
+ - hide ANSI codes automatically if the destination file is not a
+ terminal.
+
+ .. _colorama: http://pypi.python.org/pypi/colorama
+
+ .. versionchanged:: 6.0
+ As of Click 6.0 the echo function will properly support unicode
+ output on the windows console. Not that click does not modify
+ the interpreter in any way which means that `sys.stdout` or the
+ print statement or function will still not provide unicode support.
+
+ .. versionchanged:: 2.0
+ Starting with version 2.0 of Click, the echo function will work
+ with colorama if it's installed.
+
+ .. versionadded:: 3.0
+ The `err` parameter was added.
+
+ .. versionchanged:: 4.0
+ Added the `color` flag.
+
+ :param message: the message to print
+ :param file: the file to write to (defaults to ``stdout``)
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``. This is faster and easier than calling
+ :func:`get_text_stderr` yourself.
+ :param nl: if set to `True` (the default) a newline is printed afterwards.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection.
+ """
+ if file is None:
+ if err:
+ file = _default_text_stderr()
+ else:
+ file = _default_text_stdout()
+
+ # Convert non bytes/text into the native string type.
+ if message is not None and not isinstance(message, echo_native_types):
+ message = text_type(message)
+
+ if nl:
+ message = message or u''
+ if isinstance(message, text_type):
+ message += u'\n'
+ else:
+ message += b'\n'
+
+ # If there is a message, and we're in Python 3, and the value looks
+ # like bytes, we manually need to find the binary stream and write the
+ # message in there. This is done separately so that most stream
+ # types will work as you would expect. Eg: you can write to StringIO
+ # for other cases.
+ if message and not PY2 and is_bytes(message):
+ binary_file = _find_binary_writer(file)
+ if binary_file is not None:
+ file.flush()
+ binary_file.write(message)
+ binary_file.flush()
+ return
+
+ # ANSI-style support. If there is no message or we are dealing with
+ # bytes nothing is happening. If we are connected to a file we want
+ # to strip colors. If we are on windows we either wrap the stream
+ # to strip the color or we use the colorama support to translate the
+ # ansi codes to API calls.
+ if message and not is_bytes(message):
+ color = resolve_color_default(color)
+ if should_strip_ansi(file, color):
+ message = strip_ansi(message)
+ elif WIN:
+ if auto_wrap_for_ansi is not None:
+ file = auto_wrap_for_ansi(file)
+ elif not color:
+ message = strip_ansi(message)
+
+ if message:
+ file.write(message)
+ file.flush()
+
+
+def get_binary_stream(name):
+ """Returns a system stream for byte processing. This essentially
+ returns the stream from the sys module with the given name but it
+ solves some compatibility issues between different Python versions.
+ Primarily this function is necessary for getting binary streams on
+ Python 3.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ """
+ opener = binary_streams.get(name)
+ if opener is None:
+ raise TypeError('Unknown standard stream %r' % name)
+ return opener()
+
+
+def get_text_stream(name, encoding=None, errors='strict'):
+ """Returns a system stream for text processing. This usually returns
+ a wrapped stream around a binary stream returned from
+ :func:`get_binary_stream` but it also can take shortcuts on Python 3
+ for already correctly configured streams.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ :param encoding: overrides the detected default encoding.
+ :param errors: overrides the default error mode.
+ """
+ opener = text_streams.get(name)
+ if opener is None:
+ raise TypeError('Unknown standard stream %r' % name)
+ return opener(encoding, errors)
+
+
+def open_file(filename, mode='r', encoding=None, errors='strict',
+ lazy=False, atomic=False):
+ """This is similar to how the :class:`File` works but for manual
+ usage. Files are opened non lazy by default. This can open regular
+ files as well as stdin/stdout if ``'-'`` is passed.
+
+ If stdin/stdout is returned the stream is wrapped so that the context
+ manager will not close the stream accidentally. This makes it possible
+ to always use the function like this without having to worry to
+ accidentally close a standard stream::
+
+ with open_file(filename) as f:
+ ...
+
+ .. versionadded:: 3.0
+
+ :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+ :param mode: the mode in which to open the file.
+ :param encoding: the encoding to use.
+ :param errors: the error handling for this file.
+ :param lazy: can be flipped to true to open the file lazily.
+ :param atomic: in atomic mode writes go into a temporary file and it's
+ moved on close.
+ """
+ if lazy:
+ return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+ f, should_close = open_stream(filename, mode, encoding, errors,
+ atomic=atomic)
+ if not should_close:
+ f = KeepOpenFile(f)
+ return f
+
+
+def get_os_args():
+ """This returns the argument part of sys.argv in the most appropriate
+ form for processing. What this means is that this return value is in
+ a format that works for Click to process but does not necessarily
+ correspond well to what's actually standard for the interpreter.
+
+ On most environments the return value is ``sys.argv[:1]`` unchanged.
+ However if you are on Windows and running Python 2 the return value
+ will actually be a list of unicode strings instead because the
+ default behavior on that platform otherwise will not be able to
+ carry all possible values that sys.argv can have.
+
+ .. versionadded:: 6.0
+ """
+ # We can only extract the unicode argv if sys.argv has not been
+ # changed since the startup of the application.
+ if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
+ return _get_windows_argv()
+ return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+ """Formats a filename for user display. The main purpose of this
+ function is to ensure that the filename can be displayed at all. This
+ will decode the filename to unicode if necessary in a way that it will
+ not fail. Optionally, it can shorten the filename to not include the
+ full path to the filename.
+
+ :param filename: formats a filename for UI display. This will also convert
+ the filename into unicode without failing.
+ :param shorten: this optionally shortens the filename to strip of the
+ path that leads up to it.
+ """
+ if shorten:
+ filename = os.path.basename(filename)
+ return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+ r"""Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ To give you an idea, for an app called ``"Foo Bar"``, something like
+ the following folders could be returned:
+
+ Mac OS X:
+ ``~/Library/Application Support/Foo Bar``
+ Mac OS X (POSIX):
+ ``~/.foo-bar``
+ Unix:
+ ``~/.config/foo-bar``
+ Unix (POSIX):
+ ``~/.foo-bar``
+ Win XP (roaming):
+ ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar``
+ Win XP (not roaming):
+ ``C:\Documents and Settings\\Application Data\Foo Bar``
+ Win 7 (roaming):
+ ``C:\Users\\AppData\Roaming\Foo Bar``
+ Win 7 (not roaming):
+ ``C:\Users\\AppData\Local\Foo Bar``
+
+ .. versionadded:: 2.0
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no affect otherwise.
+ :param force_posix: if this is set to `True` then on any POSIX system the
+ folder will be stored in the home folder with a leading
+ dot instead of the XDG config home or darwin's
+ application support folder.
+ """
+ if WIN:
+ key = roaming and 'APPDATA' or 'LOCALAPPDATA'
+ folder = os.environ.get(key)
+ if folder is None:
+ folder = os.path.expanduser('~')
+ return os.path.join(folder, app_name)
+ if force_posix:
+ return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
+ if sys.platform == 'darwin':
+ return os.path.join(os.path.expanduser(
+ '~/Library/Application Support'), app_name)
+ return os.path.join(
+ os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
+ _posixify(app_name))
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/__init__.py b/appWSM/lib/python2.7/site-packages/dateutil/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..796ef3dfcac2ba1f348d9141400d13fc6741213b
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+from ._version import VERSION as __version__
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/_common.py b/appWSM/lib/python2.7/site-packages/dateutil/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8b4af71e53d9a8a2afb916a323038e9c5b71c9d
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/_common.py
@@ -0,0 +1,34 @@
+"""
+Common code used in multiple modules.
+"""
+
+
+class weekday(object):
+ __slots__ = ["weekday", "n"]
+
+ def __init__(self, weekday, n=None):
+ self.weekday = weekday
+ self.n = n
+
+ def __call__(self, n):
+ if n == self.n:
+ return self
+ else:
+ return self.__class__(self.weekday, n)
+
+ def __eq__(self, other):
+ try:
+ if self.weekday != other.weekday or self.n != other.n:
+ return False
+ except AttributeError:
+ return False
+ return True
+
+ __hash__ = None
+
+ def __repr__(self):
+ s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
+ if not self.n:
+ return s
+ else:
+ return "%s(%+d)" % (s, self.n)
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/_version.py b/appWSM/lib/python2.7/site-packages/dateutil/_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1a0357babf5e8d465d9df60b2a47a33e5bffbb5
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/_version.py
@@ -0,0 +1,10 @@
+"""
+Contains information about the dateutil version.
+"""
+
+VERSION_MAJOR = 2
+VERSION_MINOR = 6
+VERSION_PATCH = 1
+
+VERSION_TUPLE = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+VERSION = '.'.join(map(str, VERSION_TUPLE))
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/easter.py b/appWSM/lib/python2.7/site-packages/dateutil/easter.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4def97f966c83d08878da16ff105eb857b5fa4b
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/easter.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a generic easter computing method for any given year, using
+Western, Orthodox or Julian algorithms.
+"""
+
+import datetime
+
+__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
+
+EASTER_JULIAN = 1
+EASTER_ORTHODOX = 2
+EASTER_WESTERN = 3
+
+
+def easter(year, method=EASTER_WESTERN):
+ """
+ This method was ported from the work done by GM Arts,
+ on top of the algorithm by Claus Tondering, which was
+ based in part on the algorithm of Ouding (1940), as
+ quoted in "Explanatory Supplement to the Astronomical
+ Almanac", P. Kenneth Seidelmann, editor.
+
+ This algorithm implements three different easter
+ calculation methods:
+
+ 1 - Original calculation in Julian calendar, valid in
+ dates after 326 AD
+ 2 - Original method, with date converted to Gregorian
+ calendar, valid in years 1583 to 4099
+ 3 - Revised method, in Gregorian calendar, valid in
+ years 1583 to 4099 as well
+
+ These methods are represented by the constants:
+
+ * ``EASTER_JULIAN = 1``
+ * ``EASTER_ORTHODOX = 2``
+ * ``EASTER_WESTERN = 3``
+
+ The default method is method 3.
+
+ More about the algorithm may be found at:
+
+ http://users.chariot.net.au/~gmarts/eastalg.htm
+
+ and
+
+ http://www.tondering.dk/claus/calendar.html
+
+ """
+
+ if not (1 <= method <= 3):
+ raise ValueError("invalid method")
+
+ # g - Golden year - 1
+ # c - Century
+ # h - (23 - Epact) mod 30
+ # i - Number of days from March 21 to Paschal Full Moon
+ # j - Weekday for PFM (0=Sunday, etc)
+ # p - Number of days from March 21 to Sunday on or before PFM
+ # (-6 to 28 methods 1 & 3, to 56 for method 2)
+ # e - Extra days to add for method 2 (converting Julian
+ # date to Gregorian date)
+
+ y = year
+ g = y % 19
+ e = 0
+ if method < 3:
+ # Old method
+ i = (19*g + 15) % 30
+ j = (y + y//4 + i) % 7
+ if method == 2:
+ # Extra dates to convert Julian to Gregorian date
+ e = 10
+ if y > 1600:
+ e = e + y//100 - 16 - (y//100 - 16)//4
+ else:
+ # New method
+ c = y//100
+ h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
+ i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
+ j = (y + y//4 + i + 2 - c + c//4) % 7
+
+ # p can be from -6 to 56 corresponding to dates 22 March to 23 May
+ # (later dates apply to method 2, although 23 May never actually occurs)
+ p = i - j + e
+ d = 1 + (p + 27 + (p + 6)//40) % 31
+ m = 3 + (p + 26)//30
+ return datetime.date(int(y), int(m), int(d))
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/parser.py b/appWSM/lib/python2.7/site-packages/dateutil/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..595331f641c81da518d91ae500be15b29d2d7d66
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/parser.py
@@ -0,0 +1,1374 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a generic date/time string parser which is able to parse
+most known formats to represent a date and/or time.
+
+This module attempts to be forgiving with regards to unlikely input formats,
+returning a datetime object even for dates which are ambiguous. If an element
+of a date/time stamp is omitted, the following rules are applied:
+- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
+ on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
+ specified.
+- If a time zone is omitted, a timezone-naive datetime is returned.
+
+If any other elements are missing, they are taken from the
+:class:`datetime.datetime` object passed to the parameter ``default``. If this
+results in a day number exceeding the valid number of days per month, the
+value falls back to the end of the month.
+
+Additional resources about date/time string formats can be found below:
+
+- `A summary of the international standard date and time notation
+ `_
+- `W3C Date and Time Formats `_
+- `Time Formats (Planetary Rings Node) `_
+- `CPAN ParseDate module
+ `_
+- `Java SimpleDateFormat Class
+ `_
+"""
+from __future__ import unicode_literals
+
+import datetime
+import string
+import time
+import collections
+import re
+from io import StringIO
+from calendar import monthrange
+
+from six import text_type, binary_type, integer_types
+
+from . import relativedelta
+from . import tz
+
+__all__ = ["parse", "parserinfo"]
+
+
+class _timelex(object):
+ # Fractional seconds are sometimes split by a comma
+ _split_decimal = re.compile("([.,])")
+
+ def __init__(self, instream):
+ if isinstance(instream, binary_type):
+ instream = instream.decode()
+
+ if isinstance(instream, text_type):
+ instream = StringIO(instream)
+
+ if getattr(instream, 'read', None) is None:
+ raise TypeError('Parser must be a string or character stream, not '
+ '{itype}'.format(itype=instream.__class__.__name__))
+
+ self.instream = instream
+ self.charstack = []
+ self.tokenstack = []
+ self.eof = False
+
+ def get_token(self):
+ """
+ This function breaks the time string into lexical units (tokens), which
+ can be parsed by the parser. Lexical units are demarcated by changes in
+ the character set, so any continuous string of letters is considered
+ one unit, any continuous string of numbers is considered one unit.
+
+ The main complication arises from the fact that dots ('.') can be used
+ both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
+ "4:30:21.447"). As such, it is necessary to read the full context of
+ any dot-separated strings before breaking it into tokens; as such, this
+ function maintains a "token stack", for when the ambiguous context
+ demands that multiple tokens be parsed at once.
+ """
+ if self.tokenstack:
+ return self.tokenstack.pop(0)
+
+ seenletters = False
+ token = None
+ state = None
+
+ while not self.eof:
+ # We only realize that we've reached the end of a token when we
+ # find a character that's not part of the current token - since
+ # that character may be part of the next token, it's stored in the
+ # charstack.
+ if self.charstack:
+ nextchar = self.charstack.pop(0)
+ else:
+ nextchar = self.instream.read(1)
+ while nextchar == '\x00':
+ nextchar = self.instream.read(1)
+
+ if not nextchar:
+ self.eof = True
+ break
+ elif not state:
+ # First character of the token - determines if we're starting
+ # to parse a word, a number or something else.
+ token = nextchar
+ if self.isword(nextchar):
+ state = 'a'
+ elif self.isnum(nextchar):
+ state = '0'
+ elif self.isspace(nextchar):
+ token = ' '
+ break # emit token
+ else:
+ break # emit token
+ elif state == 'a':
+ # If we've already started reading a word, we keep reading
+ # letters until we find something that's not part of a word.
+ seenletters = True
+ if self.isword(nextchar):
+ token += nextchar
+ elif nextchar == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0':
+ # If we've already started reading a number, we keep reading
+ # numbers until we find something that doesn't fit.
+ if self.isnum(nextchar):
+ token += nextchar
+ elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == 'a.':
+ # If we've seen some letters and a dot separator, continue
+ # parsing, and the tokens will be broken up later.
+ seenletters = True
+ if nextchar == '.' or self.isword(nextchar):
+ token += nextchar
+ elif self.isnum(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0.':
+ # If we've seen at least one dot separator, keep going, we'll
+ # break up the tokens later.
+ if nextchar == '.' or self.isnum(nextchar):
+ token += nextchar
+ elif self.isword(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+
+ if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
+ token[-1] in '.,')):
+ l = self._split_decimal.split(token)
+ token = l[0]
+ for tok in l[1:]:
+ if tok:
+ self.tokenstack.append(tok)
+
+ if state == '0.' and token.count('.') == 0:
+ token = token.replace(',', '.')
+
+ return token
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.get_token()
+ if token is None:
+ raise StopIteration
+
+ return token
+
+ def next(self):
+ return self.__next__() # Python 2.x support
+
+ @classmethod
+ def split(cls, s):
+ return list(cls(s))
+
+ @classmethod
+ def isword(cls, nextchar):
+ """ Whether or not the next character is part of a word """
+ return nextchar.isalpha()
+
+ @classmethod
+ def isnum(cls, nextchar):
+ """ Whether the next character is part of a number """
+ return nextchar.isdigit()
+
+ @classmethod
+ def isspace(cls, nextchar):
+ """ Whether the next character is whitespace """
+ return nextchar.isspace()
+
+
+class _resultbase(object):
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def _repr(self, classname):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (classname, ", ".join(l))
+
+ def __len__(self):
+ return (sum(getattr(self, attr) is not None
+ for attr in self.__slots__))
+
+ def __repr__(self):
+ return self._repr(self.__class__.__name__)
+
+
+class parserinfo(object):
+ """
+ Class which handles what inputs are accepted. Subclass this to customize
+ the language and acceptable values for each parameter.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. Default is ``False``.
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ Default is ``False``.
+ """
+
+ # m from a.m/p.m, t from ISO T separator
+ JUMP = [" ", ".", ",", ";", "-", "/", "'",
+ "at", "on", "and", "ad", "m", "t", "of",
+ "st", "nd", "rd", "th"]
+
+ WEEKDAYS = [("Mon", "Monday"),
+ ("Tue", "Tuesday"),
+ ("Wed", "Wednesday"),
+ ("Thu", "Thursday"),
+ ("Fri", "Friday"),
+ ("Sat", "Saturday"),
+ ("Sun", "Sunday")]
+ MONTHS = [("Jan", "January"),
+ ("Feb", "February"),
+ ("Mar", "March"),
+ ("Apr", "April"),
+ ("May", "May"),
+ ("Jun", "June"),
+ ("Jul", "July"),
+ ("Aug", "August"),
+ ("Sep", "Sept", "September"),
+ ("Oct", "October"),
+ ("Nov", "November"),
+ ("Dec", "December")]
+ HMS = [("h", "hour", "hours"),
+ ("m", "minute", "minutes"),
+ ("s", "second", "seconds")]
+ AMPM = [("am", "a"),
+ ("pm", "p")]
+ UTCZONE = ["UTC", "GMT", "Z"]
+ PERTAIN = ["of"]
+ TZOFFSET = {}
+
+ def __init__(self, dayfirst=False, yearfirst=False):
+ self._jump = self._convert(self.JUMP)
+ self._weekdays = self._convert(self.WEEKDAYS)
+ self._months = self._convert(self.MONTHS)
+ self._hms = self._convert(self.HMS)
+ self._ampm = self._convert(self.AMPM)
+ self._utczone = self._convert(self.UTCZONE)
+ self._pertain = self._convert(self.PERTAIN)
+
+ self.dayfirst = dayfirst
+ self.yearfirst = yearfirst
+
+ self._year = time.localtime().tm_year
+ self._century = self._year // 100 * 100
+
+ def _convert(self, lst):
+ dct = {}
+ for i, v in enumerate(lst):
+ if isinstance(v, tuple):
+ for v in v:
+ dct[v.lower()] = i
+ else:
+ dct[v.lower()] = i
+ return dct
+
+ def jump(self, name):
+ return name.lower() in self._jump
+
+ def weekday(self, name):
+ if len(name) >= min(len(n) for n in self._weekdays.keys()):
+ try:
+ return self._weekdays[name.lower()]
+ except KeyError:
+ pass
+ return None
+
+ def month(self, name):
+ if len(name) >= min(len(n) for n in self._months.keys()):
+ try:
+ return self._months[name.lower()] + 1
+ except KeyError:
+ pass
+ return None
+
+ def hms(self, name):
+ try:
+ return self._hms[name.lower()]
+ except KeyError:
+ return None
+
+ def ampm(self, name):
+ try:
+ return self._ampm[name.lower()]
+ except KeyError:
+ return None
+
+ def pertain(self, name):
+ return name.lower() in self._pertain
+
+ def utczone(self, name):
+ return name.lower() in self._utczone
+
+ def tzoffset(self, name):
+ if name in self._utczone:
+ return 0
+
+ return self.TZOFFSET.get(name)
+
+ def convertyear(self, year, century_specified=False):
+ if year < 100 and not century_specified:
+ year += self._century
+ if abs(year - self._year) >= 50:
+ if year < self._year:
+ year += 100
+ else:
+ year -= 100
+ return year
+
+ def validate(self, res):
+ # move to info
+ if res.year is not None:
+ res.year = self.convertyear(res.year, res.century_specified)
+
+ if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
+ res.tzname = "UTC"
+ res.tzoffset = 0
+ elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
+ res.tzoffset = 0
+ return True
+
+
+class _ymd(list):
+ def __init__(self, tzstr, *args, **kwargs):
+ super(self.__class__, self).__init__(*args, **kwargs)
+ self.century_specified = False
+ self.tzstr = tzstr
+
+ @staticmethod
+ def token_could_be_year(token, year):
+ try:
+ return int(token) == year
+ except ValueError:
+ return False
+
+ @staticmethod
+ def find_potential_year_tokens(year, tokens):
+ return [token for token in tokens if _ymd.token_could_be_year(token, year)]
+
+ def find_probable_year_index(self, tokens):
+ """
+ attempt to deduce if a pre 100 year was lost
+ due to padded zeros being taken off
+ """
+ for index, token in enumerate(self):
+ potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens)
+ if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
+ return index
+
+ def append(self, val):
+ if hasattr(val, '__len__'):
+ if val.isdigit() and len(val) > 2:
+ self.century_specified = True
+ elif val > 100:
+ self.century_specified = True
+
+ super(self.__class__, self).append(int(val))
+
+ def resolve_ymd(self, mstridx, yearfirst, dayfirst):
+ len_ymd = len(self)
+ year, month, day = (None, None, None)
+
+ if len_ymd > 3:
+ raise ValueError("More than three YMD values")
+ elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
+ # One member, or two members with a month string
+ if mstridx != -1:
+ month = self[mstridx]
+ del self[mstridx]
+
+ if len_ymd > 1 or mstridx == -1:
+ if self[0] > 31:
+ year = self[0]
+ else:
+ day = self[0]
+
+ elif len_ymd == 2:
+ # Two members with numbers
+ if self[0] > 31:
+ # 99-01
+ year, month = self
+ elif self[1] > 31:
+ # 01-99
+ month, year = self
+ elif dayfirst and self[1] <= 12:
+ # 13-01
+ day, month = self
+ else:
+ # 01-13
+ month, day = self
+
+ elif len_ymd == 3:
+ # Three members
+ if mstridx == 0:
+ month, day, year = self
+ elif mstridx == 1:
+ if self[0] > 31 or (yearfirst and self[2] <= 31):
+ # 99-Jan-01
+ year, month, day = self
+ else:
+ # 01-Jan-01
+ # Give precendence to day-first, since
+ # two-digit years is usually hand-written.
+ day, month, year = self
+
+ elif mstridx == 2:
+ # WTF!?
+ if self[1] > 31:
+ # 01-99-Jan
+ day, year, month = self
+ else:
+ # 99-01-Jan
+ year, day, month = self
+
+ else:
+ if self[0] > 31 or \
+ self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
+ (yearfirst and self[1] <= 12 and self[2] <= 31):
+ # 99-01-01
+ if dayfirst and self[2] <= 12:
+ year, day, month = self
+ else:
+ year, month, day = self
+ elif self[0] > 12 or (dayfirst and self[1] <= 12):
+ # 13-01-01
+ day, month, year = self
+ else:
+ # 01-13-01
+ month, day, year = self
+
+ return year, month, day
+
+
+class parser(object):
+ def __init__(self, info=None):
+ self.info = info or parserinfo()
+
+ def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
+ """
+ Parse the date/time string into a :class:`datetime.datetime` object.
+
+ :param timestr:
+ Any date/time string using the supported formats.
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a
+ naive :class:`datetime.datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in minutes or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param **kwargs:
+ Keyword arguments as passed to ``_parse()``.
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ValueError:
+ Raised for invalid or unknown string format, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date
+ would be created.
+
+ :raises TypeError:
+ Raised for non-string or character stream input.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+
+ if default is None:
+ default = datetime.datetime.now().replace(hour=0, minute=0,
+ second=0, microsecond=0)
+
+ res, skipped_tokens = self._parse(timestr, **kwargs)
+
+ if res is None:
+ raise ValueError("Unknown string format")
+
+ if len(res) == 0:
+ raise ValueError("String does not contain a date.")
+
+ repl = {}
+ for attr in ("year", "month", "day", "hour",
+ "minute", "second", "microsecond"):
+ value = getattr(res, attr)
+ if value is not None:
+ repl[attr] = value
+
+ if 'day' not in repl:
+ # If the default day exceeds the last day of the month, fall back to
+ # the end of the month.
+ cyear = default.year if res.year is None else res.year
+ cmonth = default.month if res.month is None else res.month
+ cday = default.day if res.day is None else res.day
+
+ if cday > monthrange(cyear, cmonth)[1]:
+ repl['day'] = monthrange(cyear, cmonth)[1]
+
+ ret = default.replace(**repl)
+
+ if res.weekday is not None and not res.day:
+ ret = ret+relativedelta.relativedelta(weekday=res.weekday)
+
+ if not ignoretz:
+ if (isinstance(tzinfos, collections.Callable) or
+ tzinfos and res.tzname in tzinfos):
+
+ if isinstance(tzinfos, collections.Callable):
+ tzdata = tzinfos(res.tzname, res.tzoffset)
+ else:
+ tzdata = tzinfos.get(res.tzname)
+
+ if isinstance(tzdata, datetime.tzinfo):
+ tzinfo = tzdata
+ elif isinstance(tzdata, text_type):
+ tzinfo = tz.tzstr(tzdata)
+ elif isinstance(tzdata, integer_types):
+ tzinfo = tz.tzoffset(res.tzname, tzdata)
+ else:
+ raise ValueError("Offset must be tzinfo subclass, "
+ "tz string, or int offset.")
+ ret = ret.replace(tzinfo=tzinfo)
+ elif res.tzname and res.tzname in time.tzname:
+ ret = ret.replace(tzinfo=tz.tzlocal())
+ elif res.tzoffset == 0:
+ ret = ret.replace(tzinfo=tz.tzutc())
+ elif res.tzoffset:
+ ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
+
+ if kwargs.get('fuzzy_with_tokens', False):
+ return ret, skipped_tokens
+ else:
+ return ret
+
+ class _result(_resultbase):
+ __slots__ = ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond",
+ "tzname", "tzoffset", "ampm"]
+
+ def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
+ fuzzy_with_tokens=False):
+ """
+ Private method which performs the heavy lifting of parsing, called from
+ ``parse()``, which passes on its ``kwargs`` to this function.
+
+ :param timestr:
+ The string to parse.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. If set to ``None``, this value is retrieved from the
+ current :class:`parserinfo` object (which itself defaults to
+ ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ If this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ """
+ if fuzzy_with_tokens:
+ fuzzy = True
+
+ info = self.info
+
+ if dayfirst is None:
+ dayfirst = info.dayfirst
+
+ if yearfirst is None:
+ yearfirst = info.yearfirst
+
+ res = self._result()
+ l = _timelex.split(timestr) # Splits the timestr into tokens
+
+ # keep up with the last token skipped so we can recombine
+ # consecutively skipped tokens (-2 for when i begins at 0).
+ last_skipped_token_i = -2
+ skipped_tokens = list()
+
+ try:
+ # year/month/day list
+ ymd = _ymd(timestr)
+
+ # Index of the month string in ymd
+ mstridx = -1
+
+ len_l = len(l)
+ i = 0
+ while i < len_l:
+
+ # Check if it's a number
+ try:
+ value_repr = l[i]
+ value = float(value_repr)
+ except ValueError:
+ value = None
+
+ if value is not None:
+ # Token is a number
+ len_li = len(l[i])
+ i += 1
+
+ if (len(ymd) == 3 and len_li in (2, 4)
+ and res.hour is None and (i >= len_l or (l[i] != ':' and
+ info.hms(l[i]) is None))):
+ # 19990101T23[59]
+ s = l[i-1]
+ res.hour = int(s[:2])
+
+ if len_li == 4:
+ res.minute = int(s[2:])
+
+ elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
+ # YYMMDD or HHMMSS[.ss]
+ s = l[i-1]
+
+ if not ymd and l[i-1].find('.') == -1:
+ #ymd.append(info.convertyear(int(s[:2])))
+
+ ymd.append(s[:2])
+ ymd.append(s[2:4])
+ ymd.append(s[4:])
+ else:
+ # 19990101T235959[.59]
+ res.hour = int(s[:2])
+ res.minute = int(s[2:4])
+ res.second, res.microsecond = _parsems(s[4:])
+
+ elif len_li in (8, 12, 14):
+ # YYYYMMDD
+ s = l[i-1]
+ ymd.append(s[:4])
+ ymd.append(s[4:6])
+ ymd.append(s[6:8])
+
+ if len_li > 8:
+ res.hour = int(s[8:10])
+ res.minute = int(s[10:12])
+
+ if len_li > 12:
+ res.second = int(s[12:])
+
+ elif ((i < len_l and info.hms(l[i]) is not None) or
+ (i+1 < len_l and l[i] == ' ' and
+ info.hms(l[i+1]) is not None)):
+
+ # HH[ ]h or MM[ ]m or SS[.ss][ ]s
+ if l[i] == ' ':
+ i += 1
+
+ idx = info.hms(l[i])
+
+ while True:
+ if idx == 0:
+ res.hour = int(value)
+
+ if value % 1:
+ res.minute = int(60*(value % 1))
+
+ elif idx == 1:
+ res.minute = int(value)
+
+ if value % 1:
+ res.second = int(60*(value % 1))
+
+ elif idx == 2:
+ res.second, res.microsecond = \
+ _parsems(value_repr)
+
+ i += 1
+
+ if i >= len_l or idx == 2:
+ break
+
+ # 12h00
+ try:
+ value_repr = l[i]
+ value = float(value_repr)
+ except ValueError:
+ break
+ else:
+ i += 1
+ idx += 1
+
+ if i < len_l:
+ newidx = info.hms(l[i])
+
+ if newidx is not None:
+ idx = newidx
+
+ elif (i == len_l and l[i-2] == ' ' and
+ info.hms(l[i-3]) is not None):
+ # X h MM or X m SS
+ idx = info.hms(l[i-3])
+
+ if idx == 0: # h
+ res.minute = int(value)
+
+ sec_remainder = value % 1
+ if sec_remainder:
+ res.second = int(60 * sec_remainder)
+ elif idx == 1: # m
+ res.second, res.microsecond = \
+ _parsems(value_repr)
+
+ # We don't need to advance the tokens here because the
+ # i == len_l call indicates that we're looking at all
+ # the tokens already.
+
+ elif i+1 < len_l and l[i] == ':':
+ # HH:MM[:SS[.ss]]
+ res.hour = int(value)
+ i += 1
+ value = float(l[i])
+ res.minute = int(value)
+
+ if value % 1:
+ res.second = int(60*(value % 1))
+
+ i += 1
+
+ if i < len_l and l[i] == ':':
+ res.second, res.microsecond = _parsems(l[i+1])
+ i += 2
+
+ elif i < len_l and l[i] in ('-', '/', '.'):
+ sep = l[i]
+ ymd.append(value_repr)
+ i += 1
+
+ if i < len_l and not info.jump(l[i]):
+ try:
+ # 01-01[-01]
+ ymd.append(l[i])
+ except ValueError:
+ # 01-Jan[-01]
+ value = info.month(l[i])
+
+ if value is not None:
+ ymd.append(value)
+ assert mstridx == -1
+ mstridx = len(ymd)-1
+ else:
+ return None, None
+
+ i += 1
+
+ if i < len_l and l[i] == sep:
+ # We have three members
+ i += 1
+ value = info.month(l[i])
+
+ if value is not None:
+ ymd.append(value)
+ mstridx = len(ymd)-1
+ assert mstridx == -1
+ else:
+ ymd.append(l[i])
+
+ i += 1
+ elif i >= len_l or info.jump(l[i]):
+ if i+1 < len_l and info.ampm(l[i+1]) is not None:
+ # 12 am
+ res.hour = int(value)
+
+ if res.hour < 12 and info.ampm(l[i+1]) == 1:
+ res.hour += 12
+ elif res.hour == 12 and info.ampm(l[i+1]) == 0:
+ res.hour = 0
+
+ i += 1
+ else:
+ # Year, month or day
+ ymd.append(value)
+ i += 1
+ elif info.ampm(l[i]) is not None:
+
+ # 12am
+ res.hour = int(value)
+
+ if res.hour < 12 and info.ampm(l[i]) == 1:
+ res.hour += 12
+ elif res.hour == 12 and info.ampm(l[i]) == 0:
+ res.hour = 0
+ i += 1
+
+ elif not fuzzy:
+ return None, None
+ else:
+ i += 1
+ continue
+
+ # Check weekday
+ value = info.weekday(l[i])
+ if value is not None:
+ res.weekday = value
+ i += 1
+ continue
+
+ # Check month name
+ value = info.month(l[i])
+ if value is not None:
+ ymd.append(value)
+ assert mstridx == -1
+ mstridx = len(ymd)-1
+
+ i += 1
+ if i < len_l:
+ if l[i] in ('-', '/'):
+ # Jan-01[-99]
+ sep = l[i]
+ i += 1
+ ymd.append(l[i])
+ i += 1
+
+ if i < len_l and l[i] == sep:
+ # Jan-01-99
+ i += 1
+ ymd.append(l[i])
+ i += 1
+
+ elif (i+3 < len_l and l[i] == l[i+2] == ' '
+ and info.pertain(l[i+1])):
+ # Jan of 01
+ # In this case, 01 is clearly year
+ try:
+ value = int(l[i+3])
+ except ValueError:
+ # Wrong guess
+ pass
+ else:
+ # Convert it here to become unambiguous
+ ymd.append(str(info.convertyear(value)))
+ i += 4
+ continue
+
+ # Check am/pm
+ value = info.ampm(l[i])
+ if value is not None:
+ # For fuzzy parsing, 'a' or 'am' (both valid English words)
+ # may erroneously trigger the AM/PM flag. Deal with that
+ # here.
+ val_is_ampm = True
+
+ # If there's already an AM/PM flag, this one isn't one.
+ if fuzzy and res.ampm is not None:
+ val_is_ampm = False
+
+ # If AM/PM is found and hour is not, raise a ValueError
+ if res.hour is None:
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('No hour specified with ' +
+ 'AM or PM flag.')
+ elif not 0 <= res.hour <= 12:
+ # If AM/PM is found, it's a 12 hour clock, so raise
+ # an error for invalid range
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('Invalid hour specified for ' +
+ '12-hour clock.')
+
+ if val_is_ampm:
+ if value == 1 and res.hour < 12:
+ res.hour += 12
+ elif value == 0 and res.hour == 12:
+ res.hour = 0
+
+ res.ampm = value
+
+ elif fuzzy:
+ last_skipped_token_i = self._skip_token(skipped_tokens,
+ last_skipped_token_i, i, l)
+ i += 1
+ continue
+
+ # Check for a timezone name
+ if (res.hour is not None and len(l[i]) <= 5 and
+ res.tzname is None and res.tzoffset is None and
+ not [x for x in l[i] if x not in
+ string.ascii_uppercase]):
+ res.tzname = l[i]
+ res.tzoffset = info.tzoffset(res.tzname)
+ i += 1
+
+ # Check for something like GMT+3, or BRST+3. Notice
+ # that it doesn't mean "I am 3 hours after GMT", but
+ # "my time +3 is GMT". If found, we reverse the
+ # logic so that timezone parsing code will get it
+ # right.
+ if i < len_l and l[i] in ('+', '-'):
+ l[i] = ('+', '-')[l[i] == '+']
+ res.tzoffset = None
+ if info.utczone(res.tzname):
+ # With something like GMT+3, the timezone
+ # is *not* GMT.
+ res.tzname = None
+
+ continue
+
+ # Check for a numbered timezone
+ if res.hour is not None and l[i] in ('+', '-'):
+ signal = (-1, 1)[l[i] == '+']
+ i += 1
+ len_li = len(l[i])
+
+ if len_li == 4:
+ # -0300
+ res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ res.tzoffset = int(l[i])*3600+int(l[i+2])*60
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ res.tzoffset = int(l[i][:2])*3600
+ else:
+ return None, None
+ i += 1
+
+ res.tzoffset *= signal
+
+ # Look for a timezone name between parenthesis
+ if (i+3 < len_l and
+ info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
+ 3 <= len(l[i+2]) <= 5 and
+ not [x for x in l[i+2]
+ if x not in string.ascii_uppercase]):
+ # -0300 (BRST)
+ res.tzname = l[i+2]
+ i += 4
+ continue
+
+ # Check jumps
+ if not (info.jump(l[i]) or fuzzy):
+ return None, None
+
+ last_skipped_token_i = self._skip_token(skipped_tokens,
+ last_skipped_token_i, i, l)
+ i += 1
+
+ # Process year/month/day
+ year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
+ if year is not None:
+ res.year = year
+ res.century_specified = ymd.century_specified
+
+ if month is not None:
+ res.month = month
+
+ if day is not None:
+ res.day = day
+
+ except (IndexError, ValueError, AssertionError):
+ return None, None
+
+ if not info.validate(res):
+ return None, None
+
+ if fuzzy_with_tokens:
+ return res, tuple(skipped_tokens)
+ else:
+ return res, None
+
+ @staticmethod
+ def _skip_token(skipped_tokens, last_skipped_token_i, i, l):
+ if last_skipped_token_i == i - 1:
+ # recombine the tokens
+ skipped_tokens[-1] += l[i]
+ else:
+ # just append
+ skipped_tokens.append(l[i])
+ last_skipped_token_i = i
+ return last_skipped_token_i
+
+
+DEFAULTPARSER = parser()
+
+
+def parse(timestr, parserinfo=None, **kwargs):
+ """
+
+ Parse a string in one of the supported formats, using the
+ ``parserinfo`` parameters.
+
+ :param timestr:
+ A string containing a date/time stamp.
+
+ :param parserinfo:
+ A :class:`parserinfo` object containing parameters for the parser.
+ If ``None``, the default arguments to the :class:`parserinfo`
+ constructor are used.
+
+ The ``**kwargs`` parameter takes the following keyword arguments:
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a naive
+ :class:`datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in minutes or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM and
+ YMD. If set to ``None``, this value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
+ be the year, otherwise the last number is taken to be the year. If
+ this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ValueError:
+ Raised for invalid or unknown string format, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date
+ would be created.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+ if parserinfo:
+ return parser(parserinfo).parse(timestr, **kwargs)
+ else:
+ return DEFAULTPARSER.parse(timestr, **kwargs)
+
+
+class _tzparser(object):
+
+ class _result(_resultbase):
+
+ __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
+ "start", "end"]
+
+ class _attr(_resultbase):
+ __slots__ = ["month", "week", "weekday",
+ "yday", "jyday", "day", "time"]
+
+ def __repr__(self):
+ return self._repr("")
+
+ def __init__(self):
+ _resultbase.__init__(self)
+ self.start = self._attr()
+ self.end = self._attr()
+
+ def parse(self, tzstr):
+ res = self._result()
+ l = _timelex.split(tzstr)
+ try:
+
+ len_l = len(l)
+
+ i = 0
+ while i < len_l:
+ # BRST+3[BRDT[+2]]
+ j = i
+ while j < len_l and not [x for x in l[j]
+ if x in "0123456789:,-+"]:
+ j += 1
+ if j != i:
+ if not res.stdabbr:
+ offattr = "stdoffset"
+ res.stdabbr = "".join(l[i:j])
+ else:
+ offattr = "dstoffset"
+ res.dstabbr = "".join(l[i:j])
+ i = j
+ if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
+ "0123456789")):
+ if l[i] in ('+', '-'):
+ # Yes, that's right. See the TZ variable
+ # documentation.
+ signal = (1, -1)[l[i] == '+']
+ i += 1
+ else:
+ signal = -1
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ setattr(res, offattr, (int(l[i][:2])*3600 +
+ int(l[i][2:])*60)*signal)
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ setattr(res, offattr,
+ (int(l[i])*3600+int(l[i+2])*60)*signal)
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ setattr(res, offattr,
+ int(l[i][:2])*3600*signal)
+ else:
+ return None
+ i += 1
+ if res.dstabbr:
+ break
+ else:
+ break
+
+ if i < len_l:
+ for j in range(i, len_l):
+ if l[j] == ';':
+ l[j] = ','
+
+ assert l[i] == ','
+
+ i += 1
+
+ if i >= len_l:
+ pass
+ elif (8 <= l.count(',') <= 9 and
+ not [y for x in l[i:] if x != ','
+ for y in x if y not in "0123456789"]):
+ # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
+ for x in (res.start, res.end):
+ x.month = int(l[i])
+ i += 2
+ if l[i] == '-':
+ value = int(l[i+1])*-1
+ i += 1
+ else:
+ value = int(l[i])
+ i += 2
+ if value:
+ x.week = value
+ x.weekday = (int(l[i])-1) % 7
+ else:
+ x.day = int(l[i])
+ i += 2
+ x.time = int(l[i])
+ i += 2
+ if i < len_l:
+ if l[i] in ('-', '+'):
+ signal = (-1, 1)[l[i] == "+"]
+ i += 1
+ else:
+ signal = 1
+ res.dstoffset = (res.stdoffset+int(l[i]))*signal
+ elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
+ not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
+ '.', '-', ':')
+ for y in x if y not in "0123456789"]):
+ for x in (res.start, res.end):
+ if l[i] == 'J':
+ # non-leap year day (1 based)
+ i += 1
+ x.jyday = int(l[i])
+ elif l[i] == 'M':
+ # month[-.]week[-.]weekday
+ i += 1
+ x.month = int(l[i])
+ i += 1
+ assert l[i] in ('-', '.')
+ i += 1
+ x.week = int(l[i])
+ if x.week == 5:
+ x.week = -1
+ i += 1
+ assert l[i] in ('-', '.')
+ i += 1
+ x.weekday = (int(l[i])-1) % 7
+ else:
+ # year day (zero based)
+ x.yday = int(l[i])+1
+
+ i += 1
+
+ if i < len_l and l[i] == '/':
+ i += 1
+ # start time
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ x.time = int(l[i])*3600+int(l[i+2])*60
+ i += 2
+ if i+1 < len_l and l[i+1] == ':':
+ i += 2
+ x.time += int(l[i])
+ elif len_li <= 2:
+ # -[0]3
+ x.time = (int(l[i][:2])*3600)
+ else:
+ return None
+ i += 1
+
+ assert i == len_l or l[i] == ','
+
+ i += 1
+
+ assert i >= len_l
+
+ except (IndexError, ValueError, AssertionError):
+ return None
+
+ return res
+
+
+DEFAULTTZPARSER = _tzparser()
+
+
+def _parsetz(tzstr):
+ return DEFAULTTZPARSER.parse(tzstr)
+
+
+def _parsems(value):
+ """Parse a I[.F] seconds value into (seconds, microseconds)."""
+ if "." not in value:
+ return int(value), 0
+ else:
+ i, f = value.split(".")
+ return int(i), int(f.ljust(6, "0")[:6])
+
+
+# vim:ts=4:sw=4:et
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/relativedelta.py b/appWSM/lib/python2.7/site-packages/dateutil/relativedelta.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e66afc4a64d404873c7c16b83e0994413b6d575
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/relativedelta.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+import datetime
+import calendar
+
+import operator
+from math import copysign
+
+from six import integer_types
+from warnings import warn
+
+from ._common import weekday
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
+
+__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+
+class relativedelta(object):
+ """
+ The relativedelta type is based on the specification of the excellent
+ work done by M.-A. Lemburg in his
+ `mx.DateTime `_ extension.
+ However, notice that this type does *NOT* implement the same algorithm as
+ his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
+
+ There are two different ways to build a relativedelta instance. The
+ first one is passing it two date/datetime classes::
+
+ relativedelta(datetime1, datetime2)
+
+ The second one is passing it any number of the following keyword arguments::
+
+ relativedelta(arg1=x,arg2=y,arg3=z...)
+
+ year, month, day, hour, minute, second, microsecond:
+ Absolute information (argument is singular); adding or subtracting a
+ relativedelta with absolute information does not perform an aritmetic
+ operation, but rather REPLACES the corresponding value in the
+ original datetime with the value(s) in relativedelta.
+
+ years, months, weeks, days, hours, minutes, seconds, microseconds:
+ Relative information, may be negative (argument is plural); adding
+ or subtracting a relativedelta with relative information performs
+ the corresponding aritmetic operation on the original datetime value
+ with the information in the relativedelta.
+
+ weekday:
+ One of the weekday instances (MO, TU, etc). These instances may
+ receive a parameter N, specifying the Nth weekday, which could
+ be positive or negative (like MO(+1) or MO(-2). Not specifying
+ it is the same as specifying +1. You can also use an integer,
+ where 0=MO.
+
+ leapdays:
+ Will add given days to the date found, if year is a leap
+ year, and the date found is post 28 of february.
+
+ yearday, nlyearday:
+ Set the yearday or the non-leap year day (jump leap days).
+ These are converted to day/month/leapdays information.
+
+ Here is the behavior of operations with relativedelta:
+
+ 1. Calculate the absolute year, using the 'year' argument, or the
+ original datetime year, if the argument is not present.
+
+ 2. Add the relative 'years' argument to the absolute year.
+
+ 3. Do steps 1 and 2 for month/months.
+
+ 4. Calculate the absolute day, using the 'day' argument, or the
+ original datetime day, if the argument is not present. Then,
+ subtract from the day until it fits in the year and month
+ found after their operations.
+
+ 5. Add the relative 'days' argument to the absolute day. Notice
+ that the 'weeks' argument is multiplied by 7 and added to
+ 'days'.
+
+ 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
+ microsecond/microseconds.
+
+ 7. If the 'weekday' argument is present, calculate the weekday,
+ with the given (wday, nth) tuple. wday is the index of the
+ weekday (0-6, 0=Mon), and nth is the number of weeks to add
+ forward or backward, depending on its signal. Notice that if
+ the calculated date is already Monday, for example, using
+ (0, 1) or (0, -1) won't change the day.
+ """
+
+ def __init__(self, dt1=None, dt2=None,
+ years=0, months=0, days=0, leapdays=0, weeks=0,
+ hours=0, minutes=0, seconds=0, microseconds=0,
+ year=None, month=None, day=None, weekday=None,
+ yearday=None, nlyearday=None,
+ hour=None, minute=None, second=None, microsecond=None):
+
+ # Check for non-integer values in integer-only quantities
+ if any(x is not None and x != int(x) for x in (years, months)):
+ raise ValueError("Non-integer years and months are "
+ "ambiguous and not currently supported.")
+
+ if dt1 and dt2:
+ # datetime is a subclass of date. So both must be date
+ if not (isinstance(dt1, datetime.date) and
+ isinstance(dt2, datetime.date)):
+ raise TypeError("relativedelta only diffs datetime/date")
+
+ # We allow two dates, or two datetimes, so we coerce them to be
+ # of the same type
+ if (isinstance(dt1, datetime.datetime) !=
+ isinstance(dt2, datetime.datetime)):
+ if not isinstance(dt1, datetime.datetime):
+ dt1 = datetime.datetime.fromordinal(dt1.toordinal())
+ elif not isinstance(dt2, datetime.datetime):
+ dt2 = datetime.datetime.fromordinal(dt2.toordinal())
+
+ self.years = 0
+ self.months = 0
+ self.days = 0
+ self.leapdays = 0
+ self.hours = 0
+ self.minutes = 0
+ self.seconds = 0
+ self.microseconds = 0
+ self.year = None
+ self.month = None
+ self.day = None
+ self.weekday = None
+ self.hour = None
+ self.minute = None
+ self.second = None
+ self.microsecond = None
+ self._has_time = 0
+
+ # Get year / month delta between the two
+ months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
+ self._set_months(months)
+
+ # Remove the year/month delta so the timedelta is just well-defined
+ # time units (seconds, days and microseconds)
+ dtm = self.__radd__(dt2)
+
+ # If we've overshot our target, make an adjustment
+ if dt1 < dt2:
+ compare = operator.gt
+ increment = 1
+ else:
+ compare = operator.lt
+ increment = -1
+
+ while compare(dt1, dtm):
+ months += increment
+ self._set_months(months)
+ dtm = self.__radd__(dt2)
+
+ # Get the timedelta between the "months-adjusted" date and dt1
+ delta = dt1 - dtm
+ self.seconds = delta.seconds + delta.days * 86400
+ self.microseconds = delta.microseconds
+ else:
+ # Relative information
+ self.years = years
+ self.months = months
+ self.days = days + weeks * 7
+ self.leapdays = leapdays
+ self.hours = hours
+ self.minutes = minutes
+ self.seconds = seconds
+ self.microseconds = microseconds
+
+ # Absolute information
+ self.year = year
+ self.month = month
+ self.day = day
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+ self.microsecond = microsecond
+
+ if any(x is not None and int(x) != x
+ for x in (year, month, day, hour,
+ minute, second, microsecond)):
+ # For now we'll deprecate floats - later it'll be an error.
+ warn("Non-integer value passed as absolute information. " +
+ "This is not a well-defined condition and will raise " +
+ "errors in future versions.", DeprecationWarning)
+
+ if isinstance(weekday, integer_types):
+ self.weekday = weekdays[weekday]
+ else:
+ self.weekday = weekday
+
+ yday = 0
+ if nlyearday:
+ yday = nlyearday
+ elif yearday:
+ yday = yearday
+ if yearday > 59:
+ self.leapdays = -1
+ if yday:
+ ydayidx = [31, 59, 90, 120, 151, 181, 212,
+ 243, 273, 304, 334, 366]
+ for idx, ydays in enumerate(ydayidx):
+ if yday <= ydays:
+ self.month = idx+1
+ if idx == 0:
+ self.day = yday
+ else:
+ self.day = yday-ydayidx[idx-1]
+ break
+ else:
+ raise ValueError("invalid year day (%d)" % yday)
+
+ self._fix()
+
+ def _fix(self):
+ if abs(self.microseconds) > 999999:
+ s = _sign(self.microseconds)
+ div, mod = divmod(self.microseconds * s, 1000000)
+ self.microseconds = mod * s
+ self.seconds += div * s
+ if abs(self.seconds) > 59:
+ s = _sign(self.seconds)
+ div, mod = divmod(self.seconds * s, 60)
+ self.seconds = mod * s
+ self.minutes += div * s
+ if abs(self.minutes) > 59:
+ s = _sign(self.minutes)
+ div, mod = divmod(self.minutes * s, 60)
+ self.minutes = mod * s
+ self.hours += div * s
+ if abs(self.hours) > 23:
+ s = _sign(self.hours)
+ div, mod = divmod(self.hours * s, 24)
+ self.hours = mod * s
+ self.days += div * s
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years += div * s
+ if (self.hours or self.minutes or self.seconds or self.microseconds
+ or self.hour is not None or self.minute is not None or
+ self.second is not None or self.microsecond is not None):
+ self._has_time = 1
+ else:
+ self._has_time = 0
+
+ @property
+ def weeks(self):
+ return self.days // 7
+
+ @weeks.setter
+ def weeks(self, value):
+ self.days = self.days - (self.weeks * 7) + value * 7
+
+ def _set_months(self, months):
+ self.months = months
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years = div * s
+ else:
+ self.years = 0
+
+ def normalized(self):
+ """
+ Return a version of this object represented entirely using integer
+ values for the relative attributes.
+
+ >>> relativedelta(days=1.5, hours=2).normalized()
+ relativedelta(days=1, hours=14)
+
+ :return:
+ Returns a :class:`dateutil.relativedelta.relativedelta` object.
+ """
+ # Cascade remainders down (rounding each to roughly nearest microsecond)
+ days = int(self.days)
+
+ hours_f = round(self.hours + 24 * (self.days - days), 11)
+ hours = int(hours_f)
+
+ minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
+ minutes = int(minutes_f)
+
+ seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
+ seconds = int(seconds_f)
+
+ microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
+
+ # Constructor carries overflow back up with call to _fix()
+ return self.__class__(years=self.years, months=self.months,
+ days=days, hours=hours, minutes=minutes,
+ seconds=seconds, microseconds=microseconds,
+ leapdays=self.leapdays, year=self.year,
+ month=self.month, day=self.day,
+ weekday=self.weekday, hour=self.hour,
+ minute=self.minute, second=self.second,
+ microsecond=self.microsecond)
+
+ def __add__(self, other):
+ if isinstance(other, relativedelta):
+ return self.__class__(years=other.years + self.years,
+ months=other.months + self.months,
+ days=other.days + self.days,
+ hours=other.hours + self.hours,
+ minutes=other.minutes + self.minutes,
+ seconds=other.seconds + self.seconds,
+ microseconds=(other.microseconds +
+ self.microseconds),
+ leapdays=other.leapdays or self.leapdays,
+ year=(other.year if other.year is not None
+ else self.year),
+ month=(other.month if other.month is not None
+ else self.month),
+ day=(other.day if other.day is not None
+ else self.day),
+ weekday=(other.weekday if other.weekday is not None
+ else self.weekday),
+ hour=(other.hour if other.hour is not None
+ else self.hour),
+ minute=(other.minute if other.minute is not None
+ else self.minute),
+ second=(other.second if other.second is not None
+ else self.second),
+ microsecond=(other.microsecond if other.microsecond
+ is not None else
+ self.microsecond))
+ if isinstance(other, datetime.timedelta):
+ return self.__class__(years=self.years,
+ months=self.months,
+ days=self.days + other.days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds + other.seconds,
+ microseconds=self.microseconds + other.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+ if not isinstance(other, datetime.date):
+ return NotImplemented
+ elif self._has_time and not isinstance(other, datetime.datetime):
+ other = datetime.datetime.fromordinal(other.toordinal())
+ year = (self.year or other.year)+self.years
+ month = self.month or other.month
+ if self.months:
+ assert 1 <= abs(self.months) <= 12
+ month += self.months
+ if month > 12:
+ year += 1
+ month -= 12
+ elif month < 1:
+ year -= 1
+ month += 12
+ day = min(calendar.monthrange(year, month)[1],
+ self.day or other.day)
+ repl = {"year": year, "month": month, "day": day}
+ for attr in ["hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ repl[attr] = value
+ days = self.days
+ if self.leapdays and month > 2 and calendar.isleap(year):
+ days += self.leapdays
+ ret = (other.replace(**repl)
+ + datetime.timedelta(days=days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds,
+ microseconds=self.microseconds))
+ if self.weekday:
+ weekday, nth = self.weekday.weekday, self.weekday.n or 1
+ jumpdays = (abs(nth) - 1) * 7
+ if nth > 0:
+ jumpdays += (7 - ret.weekday() + weekday) % 7
+ else:
+ jumpdays += (ret.weekday() - weekday) % 7
+ jumpdays *= -1
+ ret += datetime.timedelta(days=jumpdays)
+ return ret
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __rsub__(self, other):
+ return self.__neg__().__radd__(other)
+
+ def __sub__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented # In case the other object defines __rsub__
+ return self.__class__(years=self.years - other.years,
+ months=self.months - other.months,
+ days=self.days - other.days,
+ hours=self.hours - other.hours,
+ minutes=self.minutes - other.minutes,
+ seconds=self.seconds - other.seconds,
+ microseconds=self.microseconds - other.microseconds,
+ leapdays=self.leapdays or other.leapdays,
+ year=(self.year if self.year is not None
+ else other.year),
+ month=(self.month if self.month is not None else
+ other.month),
+ day=(self.day if self.day is not None else
+ other.day),
+ weekday=(self.weekday if self.weekday is not None else
+ other.weekday),
+ hour=(self.hour if self.hour is not None else
+ other.hour),
+ minute=(self.minute if self.minute is not None else
+ other.minute),
+ second=(self.second if self.second is not None else
+ other.second),
+ microsecond=(self.microsecond if self.microsecond
+ is not None else
+ other.microsecond))
+
+ def __neg__(self):
+ return self.__class__(years=-self.years,
+ months=-self.months,
+ days=-self.days,
+ hours=-self.hours,
+ minutes=-self.minutes,
+ seconds=-self.seconds,
+ microseconds=-self.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ def __bool__(self):
+ return not (not self.years and
+ not self.months and
+ not self.days and
+ not self.hours and
+ not self.minutes and
+ not self.seconds and
+ not self.microseconds and
+ not self.leapdays and
+ self.year is None and
+ self.month is None and
+ self.day is None and
+ self.weekday is None and
+ self.hour is None and
+ self.minute is None and
+ self.second is None and
+ self.microsecond is None)
+ # Compatibility with Python 2.x
+ __nonzero__ = __bool__
+
+ def __mul__(self, other):
+ try:
+ f = float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__class__(years=int(self.years * f),
+ months=int(self.months * f),
+ days=int(self.days * f),
+ hours=int(self.hours * f),
+ minutes=int(self.minutes * f),
+ seconds=int(self.seconds * f),
+ microseconds=int(self.microseconds * f),
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ __rmul__ = __mul__
+
+ def __eq__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented
+ if self.weekday or other.weekday:
+ if not self.weekday or not other.weekday:
+ return False
+ if self.weekday.weekday != other.weekday.weekday:
+ return False
+ n1, n2 = self.weekday.n, other.weekday.n
+ if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
+ return False
+ return (self.years == other.years and
+ self.months == other.months and
+ self.days == other.days and
+ self.hours == other.hours and
+ self.minutes == other.minutes and
+ self.seconds == other.seconds and
+ self.microseconds == other.microseconds and
+ self.leapdays == other.leapdays and
+ self.year == other.year and
+ self.month == other.month and
+ self.day == other.day and
+ self.hour == other.hour and
+ self.minute == other.minute and
+ self.second == other.second and
+ self.microsecond == other.microsecond)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __div__(self, other):
+ try:
+ reciprocal = 1 / float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__mul__(reciprocal)
+
+ __truediv__ = __div__
+
+ def __repr__(self):
+ l = []
+ for attr in ["years", "months", "days", "leapdays",
+ "hours", "minutes", "seconds", "microseconds"]:
+ value = getattr(self, attr)
+ if value:
+ l.append("{attr}={value:+g}".format(attr=attr, value=value))
+ for attr in ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("{attr}={value}".format(attr=attr, value=repr(value)))
+ return "{classname}({attrs})".format(classname=self.__class__.__name__,
+ attrs=", ".join(l))
+
+
+def _sign(x):
+ return int(copysign(1, x))
+
+# vim:ts=4:sw=4:et
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/rrule.py b/appWSM/lib/python2.7/site-packages/dateutil/rrule.py
new file mode 100644
index 0000000000000000000000000000000000000000..429f8fc8b3a16d7ada88f0f61a827eb1c8090df3
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/rrule.py
@@ -0,0 +1,1610 @@
+# -*- coding: utf-8 -*-
+"""
+The rrule module offers a small, complete, and very fast, implementation of
+the recurrence rules documented in the
+`iCalendar RFC `_,
+including support for caching of results.
+"""
+import itertools
+import datetime
+import calendar
+import sys
+
+try:
+ from math import gcd
+except ImportError:
+ from fractions import gcd
+
+from six import advance_iterator, integer_types
+from six.moves import _thread, range
+import heapq
+
+from ._common import weekday as weekdaybase
+
+# For warning about deprecation of until and count
+from warnings import warn
+
+__all__ = ["rrule", "rruleset", "rrulestr",
+ "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
+ "HOURLY", "MINUTELY", "SECONDLY",
+ "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+# Every mask is 7 days longer to handle cross-year weekly periods.
+M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
+ [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
+M365MASK = list(M366MASK)
+M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
+MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+MDAY365MASK = list(MDAY366MASK)
+M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
+NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+NMDAY365MASK = list(NMDAY366MASK)
+M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
+M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
+WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
+del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
+MDAY365MASK = tuple(MDAY365MASK)
+M365MASK = tuple(M365MASK)
+
+FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
+
+(YEARLY,
+ MONTHLY,
+ WEEKLY,
+ DAILY,
+ HOURLY,
+ MINUTELY,
+ SECONDLY) = list(range(7))
+
+# Imported on demand.
+easter = None
+parser = None
+
+
+class weekday(weekdaybase):
+ """
+ This version of weekday does not allow n = 0.
+ """
+ def __init__(self, wkday, n=None):
+ if n == 0:
+ raise ValueError("Can't create weekday with n==0")
+
+ super(weekday, self).__init__(wkday, n)
+
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
+
+
+def _invalidates_cache(f):
+ """
+ Decorator for rruleset methods which may invalidate the
+ cached length.
+ """
+ def inner_func(self, *args, **kwargs):
+ rv = f(self, *args, **kwargs)
+ self._invalidate_cache()
+ return rv
+
+ return inner_func
+
+
+class rrulebase(object):
+ def __init__(self, cache=False):
+ if cache:
+ self._cache = []
+ self._cache_lock = _thread.allocate_lock()
+ self._invalidate_cache()
+ else:
+ self._cache = None
+ self._cache_complete = False
+ self._len = None
+
+ def __iter__(self):
+ if self._cache_complete:
+ return iter(self._cache)
+ elif self._cache is None:
+ return self._iter()
+ else:
+ return self._iter_cached()
+
+ def _invalidate_cache(self):
+ if self._cache is not None:
+ self._cache = []
+ self._cache_complete = False
+ self._cache_gen = self._iter()
+
+ if self._cache_lock.locked():
+ self._cache_lock.release()
+
+ self._len = None
+
+ def _iter_cached(self):
+ i = 0
+ gen = self._cache_gen
+ cache = self._cache
+ acquire = self._cache_lock.acquire
+ release = self._cache_lock.release
+ while gen:
+ if i == len(cache):
+ acquire()
+ if self._cache_complete:
+ break
+ try:
+ for j in range(10):
+ cache.append(advance_iterator(gen))
+ except StopIteration:
+ self._cache_gen = gen = None
+ self._cache_complete = True
+ break
+ release()
+ yield cache[i]
+ i += 1
+ while i < self._len:
+ yield cache[i]
+ i += 1
+
+ def __getitem__(self, item):
+ if self._cache_complete:
+ return self._cache[item]
+ elif isinstance(item, slice):
+ if item.step and item.step < 0:
+ return list(iter(self))[item]
+ else:
+ return list(itertools.islice(self,
+ item.start or 0,
+ item.stop or sys.maxsize,
+ item.step or 1))
+ elif item >= 0:
+ gen = iter(self)
+ try:
+ for i in range(item+1):
+ res = advance_iterator(gen)
+ except StopIteration:
+ raise IndexError
+ return res
+ else:
+ return list(iter(self))[item]
+
+ def __contains__(self, item):
+ if self._cache_complete:
+ return item in self._cache
+ else:
+ for i in self:
+ if i == item:
+ return True
+ elif i > item:
+ return False
+ return False
+
+ # __len__() introduces a large performance penality.
+ def count(self):
+ """ Returns the number of recurrences in this set. It will have go
+ trough the whole recurrence, if this hasn't been done before. """
+ if self._len is None:
+ for x in self:
+ pass
+ return self._len
+
+ def before(self, dt, inc=False):
+ """ Returns the last recurrence before the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ last = None
+ if inc:
+ for i in gen:
+ if i > dt:
+ break
+ last = i
+ else:
+ for i in gen:
+ if i >= dt:
+ break
+ last = i
+ return last
+
+ def after(self, dt, inc=False):
+ """ Returns the first recurrence after the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ if inc:
+ for i in gen:
+ if i >= dt:
+ return i
+ else:
+ for i in gen:
+ if i > dt:
+ return i
+ return None
+
+ def xafter(self, dt, count=None, inc=False):
+ """
+ Generator which yields up to `count` recurrences after the given
+ datetime instance, equivalent to `after`.
+
+ :param dt:
+ The datetime at which to start generating recurrences.
+
+ :param count:
+ The maximum number of recurrences to generate. If `None` (default),
+ dates are generated until the recurrence rule is exhausted.
+
+ :param inc:
+ If `dt` is an instance of the rule and `inc` is `True`, it is
+ included in the output.
+
+ :yields: Yields a sequence of `datetime` objects.
+ """
+
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+
+ # Select the comparison function
+ if inc:
+ comp = lambda dc, dtc: dc >= dtc
+ else:
+ comp = lambda dc, dtc: dc > dtc
+
+ # Generate dates
+ n = 0
+ for d in gen:
+ if comp(d, dt):
+ if count is not None:
+ n += 1
+ if n > count:
+ break
+
+ yield d
+
+ def between(self, after, before, inc=False, count=1):
+ """ Returns all the occurrences of the rrule between after and before.
+ The inc keyword defines what happens if after and/or before are
+ themselves occurrences. With inc=True, they will be included in the
+ list, if they are found in the recurrence set. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ started = False
+ l = []
+ if inc:
+ for i in gen:
+ if i > before:
+ break
+ elif not started:
+ if i >= after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ else:
+ for i in gen:
+ if i >= before:
+ break
+ elif not started:
+ if i > after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ return l
+
+
+class rrule(rrulebase):
+ """
+ That's the base of the rrule operation. It accepts all the keywords
+ defined in the RFC as its constructor parameters (except byday,
+ which was renamed to byweekday) and more. The constructor prototype is::
+
+ rrule(freq)
+
+ Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
+ or SECONDLY.
+
+ .. note::
+ Per RFC section 3.3.10, recurrence instances falling on invalid dates
+ and times are ignored rather than coerced:
+
+ Recurrence rules may generate recurrence instances with an invalid
+ date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
+ on a day where the local time is moved forward by an hour at 1:00
+ AM). Such recurrence instances MUST be ignored and MUST NOT be
+ counted as part of the recurrence set.
+
+ This can lead to possibly surprising behavior when, for example, the
+ start date occurs at the end of the month:
+
+ >>> from dateutil.rrule import rrule, MONTHLY
+ >>> from datetime import datetime
+ >>> start_date = datetime(2014, 12, 31)
+ >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [datetime.datetime(2014, 12, 31, 0, 0),
+ datetime.datetime(2015, 1, 31, 0, 0),
+ datetime.datetime(2015, 3, 31, 0, 0),
+ datetime.datetime(2015, 5, 31, 0, 0)]
+
+ Additionally, it supports the following keyword arguments:
+
+ :param cache:
+ If given, it must be a boolean value specifying to enable or disable
+ caching of results. If you will use the same rrule instance multiple
+ times, enabling caching will improve the performance considerably.
+ :param dtstart:
+ The recurrence start. Besides being the base for the recurrence,
+ missing parameters in the final recurrence instances will also be
+ extracted from this date. If not given, datetime.now() will be used
+ instead.
+ :param interval:
+ The interval between each freq iteration. For example, when using
+ YEARLY, an interval of 2 means once every two years, but with HOURLY,
+ it means once every two hours. The default interval is 1.
+ :param wkst:
+ The week start day. Must be one of the MO, TU, WE constants, or an
+ integer, specifying the first day of the week. This will affect
+ recurrences based on weekly periods. The default week start is got
+ from calendar.firstweekday(), and may be modified by
+ calendar.setfirstweekday().
+ :param count:
+ How many occurrences will be generated.
+
+ .. note::
+ As of version 2.5.0, the use of the ``until`` keyword together
+ with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
+ :param until:
+ If given, this must be a datetime instance, that will specify the
+ limit of the recurrence. The last recurrence in the rule is the greatest
+ datetime that is less than or equal to the value specified in the
+ ``until`` parameter.
+
+ .. note::
+ As of version 2.5.0, the use of the ``until`` keyword together
+ with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
+ :param bysetpos:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each given integer will specify an occurrence
+ number, corresponding to the nth occurrence of the rule inside the
+ frequency period. For example, a bysetpos of -1 if combined with a
+ MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
+ result in the last work day of every month.
+ :param bymonth:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the months to apply the recurrence to.
+ :param bymonthday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the month days to apply the recurrence to.
+ :param byyearday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the year days to apply the recurrence to.
+ :param byweekno:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the week numbers to apply the recurrence to. Week numbers
+ have the meaning described in ISO8601, that is, the first week of
+ the year is that containing at least four days of the new year.
+ :param byweekday:
+ If given, it must be either an integer (0 == MO), a sequence of
+ integers, one of the weekday constants (MO, TU, etc), or a sequence
+ of these constants. When given, these variables will define the
+ weekdays where the recurrence will be applied. It's also possible to
+ use an argument n for the weekday instances, which will mean the nth
+ occurrence of this weekday in the period. For example, with MONTHLY,
+ or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
+ first friday of the month where the recurrence happens. Notice that in
+ the RFC documentation, this is specified as BYDAY, but was renamed to
+ avoid the ambiguity of that keyword.
+ :param byhour:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the hours to apply the recurrence to.
+ :param byminute:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the minutes to apply the recurrence to.
+ :param bysecond:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the seconds to apply the recurrence to.
+ :param byeaster:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each integer will define an offset from the
+ Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
+ Sunday itself. This is an extension to the RFC specification.
+ """
+ def __init__(self, freq, dtstart=None,
+ interval=1, wkst=None, count=None, until=None, bysetpos=None,
+ bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
+ byweekno=None, byweekday=None,
+ byhour=None, byminute=None, bysecond=None,
+ cache=False):
+ super(rrule, self).__init__(cache)
+ global easter
+ if not dtstart:
+ dtstart = datetime.datetime.now().replace(microsecond=0)
+ elif not isinstance(dtstart, datetime.datetime):
+ dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
+ else:
+ dtstart = dtstart.replace(microsecond=0)
+ self._dtstart = dtstart
+ self._tzinfo = dtstart.tzinfo
+ self._freq = freq
+ self._interval = interval
+ self._count = count
+
+ # Cache the original byxxx rules, if they are provided, as the _byxxx
+ # attributes do not necessarily map to the inputs, and this can be
+ # a problem in generating the strings. Only store things if they've
+ # been supplied (the string retrieval will just use .get())
+ self._original_rule = {}
+
+ if until and not isinstance(until, datetime.datetime):
+ until = datetime.datetime.fromordinal(until.toordinal())
+ self._until = until
+
+ if count is not None and until:
+ warn("Using both 'count' and 'until' is inconsistent with RFC 2445"
+ " and has been deprecated in dateutil. Future versions will "
+ "raise an error.", DeprecationWarning)
+
+ if wkst is None:
+ self._wkst = calendar.firstweekday()
+ elif isinstance(wkst, integer_types):
+ self._wkst = wkst
+ else:
+ self._wkst = wkst.weekday
+
+ if bysetpos is None:
+ self._bysetpos = None
+ elif isinstance(bysetpos, integer_types):
+ if bysetpos == 0 or not (-366 <= bysetpos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+ self._bysetpos = (bysetpos,)
+ else:
+ self._bysetpos = tuple(bysetpos)
+ for pos in self._bysetpos:
+ if pos == 0 or not (-366 <= pos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+
+ if self._bysetpos:
+ self._original_rule['bysetpos'] = self._bysetpos
+
+ if (byweekno is None and byyearday is None and bymonthday is None and
+ byweekday is None and byeaster is None):
+ if freq == YEARLY:
+ if bymonth is None:
+ bymonth = dtstart.month
+ self._original_rule['bymonth'] = None
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == MONTHLY:
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == WEEKLY:
+ byweekday = dtstart.weekday()
+ self._original_rule['byweekday'] = None
+
+ # bymonth
+ if bymonth is None:
+ self._bymonth = None
+ else:
+ if isinstance(bymonth, integer_types):
+ bymonth = (bymonth,)
+
+ self._bymonth = tuple(sorted(set(bymonth)))
+
+ if 'bymonth' not in self._original_rule:
+ self._original_rule['bymonth'] = self._bymonth
+
+ # byyearday
+ if byyearday is None:
+ self._byyearday = None
+ else:
+ if isinstance(byyearday, integer_types):
+ byyearday = (byyearday,)
+
+ self._byyearday = tuple(sorted(set(byyearday)))
+ self._original_rule['byyearday'] = self._byyearday
+
+ # byeaster
+ if byeaster is not None:
+ if not easter:
+ from dateutil import easter
+ if isinstance(byeaster, integer_types):
+ self._byeaster = (byeaster,)
+ else:
+ self._byeaster = tuple(sorted(byeaster))
+
+ self._original_rule['byeaster'] = self._byeaster
+ else:
+ self._byeaster = None
+
+ # bymonthday
+ if bymonthday is None:
+ self._bymonthday = ()
+ self._bynmonthday = ()
+ else:
+ if isinstance(bymonthday, integer_types):
+ bymonthday = (bymonthday,)
+
+ bymonthday = set(bymonthday) # Ensure it's unique
+
+ self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
+ self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
+
+ # Storing positive numbers first, then negative numbers
+ if 'bymonthday' not in self._original_rule:
+ self._original_rule['bymonthday'] = tuple(
+ itertools.chain(self._bymonthday, self._bynmonthday))
+
+ # byweekno
+ if byweekno is None:
+ self._byweekno = None
+ else:
+ if isinstance(byweekno, integer_types):
+ byweekno = (byweekno,)
+
+ self._byweekno = tuple(sorted(set(byweekno)))
+
+ self._original_rule['byweekno'] = self._byweekno
+
+ # byweekday / bynweekday
+ if byweekday is None:
+ self._byweekday = None
+ self._bynweekday = None
+ else:
+ # If it's one of the valid non-sequence types, convert to a
+ # single-element sequence before the iterator that builds the
+ # byweekday set.
+ if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
+ byweekday = (byweekday,)
+
+ self._byweekday = set()
+ self._bynweekday = set()
+ for wday in byweekday:
+ if isinstance(wday, integer_types):
+ self._byweekday.add(wday)
+ elif not wday.n or freq > MONTHLY:
+ self._byweekday.add(wday.weekday)
+ else:
+ self._bynweekday.add((wday.weekday, wday.n))
+
+ if not self._byweekday:
+ self._byweekday = None
+ elif not self._bynweekday:
+ self._bynweekday = None
+
+ if self._byweekday is not None:
+ self._byweekday = tuple(sorted(self._byweekday))
+ orig_byweekday = [weekday(x) for x in self._byweekday]
+ else:
+ orig_byweekday = tuple()
+
+ if self._bynweekday is not None:
+ self._bynweekday = tuple(sorted(self._bynweekday))
+ orig_bynweekday = [weekday(*x) for x in self._bynweekday]
+ else:
+ orig_bynweekday = tuple()
+
+ if 'byweekday' not in self._original_rule:
+ self._original_rule['byweekday'] = tuple(itertools.chain(
+ orig_byweekday, orig_bynweekday))
+
+ # byhour
+ if byhour is None:
+ if freq < HOURLY:
+ self._byhour = set((dtstart.hour,))
+ else:
+ self._byhour = None
+ else:
+ if isinstance(byhour, integer_types):
+ byhour = (byhour,)
+
+ if freq == HOURLY:
+ self._byhour = self.__construct_byset(start=dtstart.hour,
+ byxxx=byhour,
+ base=24)
+ else:
+ self._byhour = set(byhour)
+
+ self._byhour = tuple(sorted(self._byhour))
+ self._original_rule['byhour'] = self._byhour
+
+ # byminute
+ if byminute is None:
+ if freq < MINUTELY:
+ self._byminute = set((dtstart.minute,))
+ else:
+ self._byminute = None
+ else:
+ if isinstance(byminute, integer_types):
+ byminute = (byminute,)
+
+ if freq == MINUTELY:
+ self._byminute = self.__construct_byset(start=dtstart.minute,
+ byxxx=byminute,
+ base=60)
+ else:
+ self._byminute = set(byminute)
+
+ self._byminute = tuple(sorted(self._byminute))
+ self._original_rule['byminute'] = self._byminute
+
+ # bysecond
+ if bysecond is None:
+ if freq < SECONDLY:
+ self._bysecond = ((dtstart.second,))
+ else:
+ self._bysecond = None
+ else:
+ if isinstance(bysecond, integer_types):
+ bysecond = (bysecond,)
+
+ self._bysecond = set(bysecond)
+
+ if freq == SECONDLY:
+ self._bysecond = self.__construct_byset(start=dtstart.second,
+ byxxx=bysecond,
+ base=60)
+ else:
+ self._bysecond = set(bysecond)
+
+ self._bysecond = tuple(sorted(self._bysecond))
+ self._original_rule['bysecond'] = self._bysecond
+
+ if self._freq >= HOURLY:
+ self._timeset = None
+ else:
+ self._timeset = []
+ for hour in self._byhour:
+ for minute in self._byminute:
+ for second in self._bysecond:
+ self._timeset.append(
+ datetime.time(hour, minute, second,
+ tzinfo=self._tzinfo))
+ self._timeset.sort()
+ self._timeset = tuple(self._timeset)
+
+ def __str__(self):
+ """
+ Output a string that would generate this RRULE if passed to rrulestr.
+ This is mostly compatible with RFC2445, except for the
+ dateutil-specific extension BYEASTER.
+ """
+
+ output = []
+ h, m, s = [None] * 3
+ if self._dtstart:
+ output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
+ h, m, s = self._dtstart.timetuple()[3:6]
+
+ parts = ['FREQ=' + FREQNAMES[self._freq]]
+ if self._interval != 1:
+ parts.append('INTERVAL=' + str(self._interval))
+
+ if self._wkst:
+ parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
+
+ if self._count is not None:
+ parts.append('COUNT=' + str(self._count))
+
+ if self._until:
+ parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
+
+ if self._original_rule.get('byweekday') is not None:
+ # The str() method on weekday objects doesn't generate
+ # RFC2445-compliant strings, so we should modify that.
+ original_rule = dict(self._original_rule)
+ wday_strings = []
+ for wday in original_rule['byweekday']:
+ if wday.n:
+ wday_strings.append('{n:+d}{wday}'.format(
+ n=wday.n,
+ wday=repr(wday)[0:2]))
+ else:
+ wday_strings.append(repr(wday))
+
+ original_rule['byweekday'] = wday_strings
+ else:
+ original_rule = self._original_rule
+
+ partfmt = '{name}={vals}'
+ for name, key in [('BYSETPOS', 'bysetpos'),
+ ('BYMONTH', 'bymonth'),
+ ('BYMONTHDAY', 'bymonthday'),
+ ('BYYEARDAY', 'byyearday'),
+ ('BYWEEKNO', 'byweekno'),
+ ('BYDAY', 'byweekday'),
+ ('BYHOUR', 'byhour'),
+ ('BYMINUTE', 'byminute'),
+ ('BYSECOND', 'bysecond'),
+ ('BYEASTER', 'byeaster')]:
+ value = original_rule.get(key)
+ if value:
+ parts.append(partfmt.format(name=name, vals=(','.join(str(v)
+ for v in value))))
+
+ output.append(';'.join(parts))
+ return '\n'.join(output)
+
+ def replace(self, **kwargs):
+ """Return new rrule with same attributes except for those attributes given new
+ values by whichever keyword arguments are specified."""
+ new_kwargs = {"interval": self._interval,
+ "count": self._count,
+ "dtstart": self._dtstart,
+ "freq": self._freq,
+ "until": self._until,
+ "wkst": self._wkst,
+ "cache": False if self._cache is None else True }
+ new_kwargs.update(self._original_rule)
+ new_kwargs.update(kwargs)
+ return rrule(**new_kwargs)
+
+ def _iter(self):
+ year, month, day, hour, minute, second, weekday, yearday, _ = \
+ self._dtstart.timetuple()
+
+ # Some local variables to speed things up a bit
+ freq = self._freq
+ interval = self._interval
+ wkst = self._wkst
+ until = self._until
+ bymonth = self._bymonth
+ byweekno = self._byweekno
+ byyearday = self._byyearday
+ byweekday = self._byweekday
+ byeaster = self._byeaster
+ bymonthday = self._bymonthday
+ bynmonthday = self._bynmonthday
+ bysetpos = self._bysetpos
+ byhour = self._byhour
+ byminute = self._byminute
+ bysecond = self._bysecond
+
+ ii = _iterinfo(self)
+ ii.rebuild(year, month)
+
+ getdayset = {YEARLY: ii.ydayset,
+ MONTHLY: ii.mdayset,
+ WEEKLY: ii.wdayset,
+ DAILY: ii.ddayset,
+ HOURLY: ii.ddayset,
+ MINUTELY: ii.ddayset,
+ SECONDLY: ii.ddayset}[freq]
+
+ if freq < HOURLY:
+ timeset = self._timeset
+ else:
+ gettimeset = {HOURLY: ii.htimeset,
+ MINUTELY: ii.mtimeset,
+ SECONDLY: ii.stimeset}[freq]
+ if ((freq >= HOURLY and
+ self._byhour and hour not in self._byhour) or
+ (freq >= MINUTELY and
+ self._byminute and minute not in self._byminute) or
+ (freq >= SECONDLY and
+ self._bysecond and second not in self._bysecond)):
+ timeset = ()
+ else:
+ timeset = gettimeset(hour, minute, second)
+
+ total = 0
+ count = self._count
+ while True:
+ # Get dayset with the right frequency
+ dayset, start, end = getdayset(year, month, day)
+
+ # Do the "hard" work ;-)
+ filtered = False
+ for i in dayset[start:end]:
+ if ((bymonth and ii.mmask[i] not in bymonth) or
+ (byweekno and not ii.wnomask[i]) or
+ (byweekday and ii.wdaymask[i] not in byweekday) or
+ (ii.nwdaymask and not ii.nwdaymask[i]) or
+ (byeaster and not ii.eastermask[i]) or
+ ((bymonthday or bynmonthday) and
+ ii.mdaymask[i] not in bymonthday and
+ ii.nmdaymask[i] not in bynmonthday) or
+ (byyearday and
+ ((i < ii.yearlen and i+1 not in byyearday and
+ -ii.yearlen+i not in byyearday) or
+ (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
+ -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
+ dayset[i] = None
+ filtered = True
+
+ # Output results
+ if bysetpos and timeset:
+ poslist = []
+ for pos in bysetpos:
+ if pos < 0:
+ daypos, timepos = divmod(pos, len(timeset))
+ else:
+ daypos, timepos = divmod(pos-1, len(timeset))
+ try:
+ i = [x for x in dayset[start:end]
+ if x is not None][daypos]
+ time = timeset[timepos]
+ except IndexError:
+ pass
+ else:
+ date = datetime.date.fromordinal(ii.yearordinal+i)
+ res = datetime.datetime.combine(date, time)
+ if res not in poslist:
+ poslist.append(res)
+ poslist.sort()
+ for res in poslist:
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ if count is not None:
+ count -= 1
+ if count < 0:
+ self._len = total
+ return
+ total += 1
+ yield res
+ else:
+ for i in dayset[start:end]:
+ if i is not None:
+ date = datetime.date.fromordinal(ii.yearordinal + i)
+ for time in timeset:
+ res = datetime.datetime.combine(date, time)
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ if count is not None:
+ count -= 1
+ if count < 0:
+ self._len = total
+ return
+
+ total += 1
+ yield res
+
+ # Handle frequency and interval
+ fixday = False
+ if freq == YEARLY:
+ year += interval
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == MONTHLY:
+ month += interval
+ if month > 12:
+ div, mod = divmod(month, 12)
+ month = mod
+ year += div
+ if month == 0:
+ month = 12
+ year -= 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == WEEKLY:
+ if wkst > weekday:
+ day += -(weekday+1+(6-wkst))+self._interval*7
+ else:
+ day += -(weekday-wkst)+self._interval*7
+ weekday = wkst
+ fixday = True
+ elif freq == DAILY:
+ day += interval
+ fixday = True
+ elif freq == HOURLY:
+ if filtered:
+ # Jump to one iteration before next day
+ hour += ((23-hour)//interval)*interval
+
+ if byhour:
+ ndays, hour = self.__mod_distance(value=hour,
+ byxxx=self._byhour,
+ base=24)
+ else:
+ ndays, hour = divmod(hour+interval, 24)
+
+ if ndays:
+ day += ndays
+ fixday = True
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == MINUTELY:
+ if filtered:
+ # Jump to one iteration before next day
+ minute += ((1439-(hour*60+minute))//interval)*interval
+
+ valid = False
+ rep_rate = (24*60)
+ for j in range(rep_rate // gcd(interval, rep_rate)):
+ if byminute:
+ nhours, minute = \
+ self.__mod_distance(value=minute,
+ byxxx=self._byminute,
+ base=60)
+ else:
+ nhours, minute = divmod(minute+interval, 60)
+
+ div, hour = divmod(hour+nhours, 24)
+ if div:
+ day += div
+ fixday = True
+ filtered = False
+
+ if not byhour or hour in byhour:
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval and ' +
+ 'byhour resulting in empty rule.')
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == SECONDLY:
+ if filtered:
+ # Jump to one iteration before next day
+ second += (((86399 - (hour * 3600 + minute * 60 + second))
+ // interval) * interval)
+
+ rep_rate = (24 * 3600)
+ valid = False
+ for j in range(0, rep_rate // gcd(interval, rep_rate)):
+ if bysecond:
+ nminutes, second = \
+ self.__mod_distance(value=second,
+ byxxx=self._bysecond,
+ base=60)
+ else:
+ nminutes, second = divmod(second+interval, 60)
+
+ div, minute = divmod(minute+nminutes, 60)
+ if div:
+ hour += div
+ div, hour = divmod(hour, 24)
+ if div:
+ day += div
+ fixday = True
+
+ if ((not byhour or hour in byhour) and
+ (not byminute or minute in byminute) and
+ (not bysecond or second in bysecond)):
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval, ' +
+ 'byhour and byminute resulting in empty' +
+ ' rule.')
+
+ timeset = gettimeset(hour, minute, second)
+
+ if fixday and day > 28:
+ daysinmonth = calendar.monthrange(year, month)[1]
+ if day > daysinmonth:
+ while day > daysinmonth:
+ day -= daysinmonth
+ month += 1
+ if month == 13:
+ month = 1
+ year += 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ daysinmonth = calendar.monthrange(year, month)[1]
+ ii.rebuild(year, month)
+
+ def __construct_byset(self, start, byxxx, base):
+ """
+ If a `BYXXX` sequence is passed to the constructor at the same level as
+ `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
+ specifications which cannot be reached given some starting conditions.
+
+ This occurs whenever the interval is not coprime with the base of a
+ given unit and the difference between the starting position and the
+ ending position is not coprime with the greatest common denominator
+ between the interval and the base. For example, with a FREQ of hourly
+ starting at 17:00 and an interval of 4, the only valid values for
+ BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
+ coprime.
+
+ :param start:
+ Specifies the starting position.
+ :param byxxx:
+ An iterable containing the list of allowed values.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ This does not preserve the type of the iterable, returning a set, since
+ the values should be unique and the order is irrelevant, this will
+ speed up later lookups.
+
+ In the event of an empty set, raises a :exception:`ValueError`, as this
+ results in an empty rrule.
+ """
+
+ cset = set()
+
+ # Support a single byxxx value.
+ if isinstance(byxxx, integer_types):
+ byxxx = (byxxx, )
+
+ for num in byxxx:
+ i_gcd = gcd(self._interval, base)
+ # Use divmod rather than % because we need to wrap negative nums.
+ if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
+ cset.add(num)
+
+ if len(cset) == 0:
+ raise ValueError("Invalid rrule byxxx generates an empty set.")
+
+ return cset
+
+ def __mod_distance(self, value, byxxx, base):
+ """
+ Calculates the next value in a sequence where the `FREQ` parameter is
+ specified along with a `BYXXX` parameter at the same "level"
+ (e.g. `HOURLY` specified with `BYHOUR`).
+
+ :param value:
+ The old value of the component.
+ :param byxxx:
+ The `BYXXX` set, which should have been generated by
+ `rrule._construct_byset`, or something else which checks that a
+ valid rule is present.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ If a valid value is not found after `base` iterations (the maximum
+ number before the sequence would start to repeat), this raises a
+ :exception:`ValueError`, as no valid values were found.
+
+ This returns a tuple of `divmod(n*interval, base)`, where `n` is the
+ smallest number of `interval` repetitions until the next specified
+ value in `byxxx` is found.
+ """
+ accumulator = 0
+ for ii in range(1, base + 1):
+ # Using divmod() over % to account for negative intervals
+ div, value = divmod(value + self._interval, base)
+ accumulator += div
+ if value in byxxx:
+ return (accumulator, value)
+
+
+class _iterinfo(object):
+ __slots__ = ["rrule", "lastyear", "lastmonth",
+ "yearlen", "nextyearlen", "yearordinal", "yearweekday",
+ "mmask", "mrange", "mdaymask", "nmdaymask",
+ "wdaymask", "wnomask", "nwdaymask", "eastermask"]
+
+ def __init__(self, rrule):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+ self.rrule = rrule
+
+ def rebuild(self, year, month):
+ # Every mask is 7 days longer to handle cross-year weekly periods.
+ rr = self.rrule
+ if year != self.lastyear:
+ self.yearlen = 365 + calendar.isleap(year)
+ self.nextyearlen = 365 + calendar.isleap(year + 1)
+ firstyday = datetime.date(year, 1, 1)
+ self.yearordinal = firstyday.toordinal()
+ self.yearweekday = firstyday.weekday()
+
+ wday = datetime.date(year, 1, 1).weekday()
+ if self.yearlen == 365:
+ self.mmask = M365MASK
+ self.mdaymask = MDAY365MASK
+ self.nmdaymask = NMDAY365MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M365RANGE
+ else:
+ self.mmask = M366MASK
+ self.mdaymask = MDAY366MASK
+ self.nmdaymask = NMDAY366MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M366RANGE
+
+ if not rr._byweekno:
+ self.wnomask = None
+ else:
+ self.wnomask = [0]*(self.yearlen+7)
+ # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
+ no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
+ if no1wkst >= 4:
+ no1wkst = 0
+ # Number of days in the year, plus the days we got
+ # from last year.
+ wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
+ else:
+ # Number of days in the year, minus the days we
+ # left in last year.
+ wyearlen = self.yearlen-no1wkst
+ div, mod = divmod(wyearlen, 7)
+ numweeks = div+mod//4
+ for n in rr._byweekno:
+ if n < 0:
+ n += numweeks+1
+ if not (0 < n <= numweeks):
+ continue
+ if n > 1:
+ i = no1wkst+(n-1)*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ else:
+ i = no1wkst
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if 1 in rr._byweekno:
+ # Check week number 1 of next year as well
+ # TODO: Check -numweeks for next year.
+ i = no1wkst+numweeks*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ if i < self.yearlen:
+ # If week starts in next year, we
+ # don't care about it.
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if no1wkst:
+ # Check last week number of last year as
+ # well. If no1wkst is 0, either the year
+ # started on week start, or week number 1
+ # got days from last year, so there are no
+ # days from last year's last week number in
+ # this year.
+ if -1 not in rr._byweekno:
+ lyearweekday = datetime.date(year-1, 1, 1).weekday()
+ lno1wkst = (7-lyearweekday+rr._wkst) % 7
+ lyearlen = 365+calendar.isleap(year-1)
+ if lno1wkst >= 4:
+ lno1wkst = 0
+ lnumweeks = 52+(lyearlen +
+ (lyearweekday-rr._wkst) % 7) % 7//4
+ else:
+ lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
+ else:
+ lnumweeks = -1
+ if lnumweeks in rr._byweekno:
+ for i in range(no1wkst):
+ self.wnomask[i] = 1
+
+ if (rr._bynweekday and (month != self.lastmonth or
+ year != self.lastyear)):
+ ranges = []
+ if rr._freq == YEARLY:
+ if rr._bymonth:
+ for month in rr._bymonth:
+ ranges.append(self.mrange[month-1:month+1])
+ else:
+ ranges = [(0, self.yearlen)]
+ elif rr._freq == MONTHLY:
+ ranges = [self.mrange[month-1:month+1]]
+ if ranges:
+ # Weekly frequency won't get here, so we may not
+ # care about cross-year weekly periods.
+ self.nwdaymask = [0]*self.yearlen
+ for first, last in ranges:
+ last -= 1
+ for wday, n in rr._bynweekday:
+ if n < 0:
+ i = last+(n+1)*7
+ i -= (self.wdaymask[i]-wday) % 7
+ else:
+ i = first+(n-1)*7
+ i += (7-self.wdaymask[i]+wday) % 7
+ if first <= i <= last:
+ self.nwdaymask[i] = 1
+
+ if rr._byeaster:
+ self.eastermask = [0]*(self.yearlen+7)
+ eyday = easter.easter(year).toordinal()-self.yearordinal
+ for offset in rr._byeaster:
+ self.eastermask[eyday+offset] = 1
+
+ self.lastyear = year
+ self.lastmonth = month
+
+ def ydayset(self, year, month, day):
+ return list(range(self.yearlen)), 0, self.yearlen
+
+ def mdayset(self, year, month, day):
+ dset = [None]*self.yearlen
+ start, end = self.mrange[month-1:month+1]
+ for i in range(start, end):
+ dset[i] = i
+ return dset, start, end
+
+ def wdayset(self, year, month, day):
+ # We need to handle cross-year weeks here.
+ dset = [None]*(self.yearlen+7)
+ i = datetime.date(year, month, day).toordinal()-self.yearordinal
+ start = i
+ for j in range(7):
+ dset[i] = i
+ i += 1
+ # if (not (0 <= i < self.yearlen) or
+ # self.wdaymask[i] == self.rrule._wkst):
+ # This will cross the year boundary, if necessary.
+ if self.wdaymask[i] == self.rrule._wkst:
+ break
+ return dset, start, i
+
+ def ddayset(self, year, month, day):
+ dset = [None] * self.yearlen
+ i = datetime.date(year, month, day).toordinal() - self.yearordinal
+ dset[i] = i
+ return dset, i, i + 1
+
+ def htimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for minute in rr._byminute:
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second,
+ tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def mtimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def stimeset(self, hour, minute, second):
+ return (datetime.time(hour, minute, second,
+ tzinfo=self.rrule._tzinfo),)
+
+
+class rruleset(rrulebase):
+ """ The rruleset type allows more complex recurrence setups, mixing
+ multiple rules, dates, exclusion rules, and exclusion dates. The type
+ constructor takes the following keyword arguments:
+
+ :param cache: If True, caching of results will be enabled, improving
+ performance of multiple queries considerably. """
+
+ class _genitem(object):
+ def __init__(self, genlist, gen):
+ try:
+ self.dt = advance_iterator(gen)
+ genlist.append(self)
+ except StopIteration:
+ pass
+ self.genlist = genlist
+ self.gen = gen
+
+ def __next__(self):
+ try:
+ self.dt = advance_iterator(self.gen)
+ except StopIteration:
+ if self.genlist[0] is self:
+ heapq.heappop(self.genlist)
+ else:
+ self.genlist.remove(self)
+ heapq.heapify(self.genlist)
+
+ next = __next__
+
+ def __lt__(self, other):
+ return self.dt < other.dt
+
+ def __gt__(self, other):
+ return self.dt > other.dt
+
+ def __eq__(self, other):
+ return self.dt == other.dt
+
+ def __ne__(self, other):
+ return self.dt != other.dt
+
+ def __init__(self, cache=False):
+ super(rruleset, self).__init__(cache)
+ self._rrule = []
+ self._rdate = []
+ self._exrule = []
+ self._exdate = []
+
+ @_invalidates_cache
+ def rrule(self, rrule):
+ """ Include the given :py:class:`rrule` instance in the recurrence set
+ generation. """
+ self._rrule.append(rrule)
+
+ @_invalidates_cache
+ def rdate(self, rdate):
+ """ Include the given :py:class:`datetime` instance in the recurrence
+ set generation. """
+ self._rdate.append(rdate)
+
+ @_invalidates_cache
+ def exrule(self, exrule):
+ """ Include the given rrule instance in the recurrence set exclusion
+ list. Dates which are part of the given recurrence rules will not
+ be generated, even if some inclusive rrule or rdate matches them.
+ """
+ self._exrule.append(exrule)
+
+ @_invalidates_cache
+ def exdate(self, exdate):
+ """ Include the given datetime instance in the recurrence set
+ exclusion list. Dates included that way will not be generated,
+ even if some inclusive rrule or rdate matches them. """
+ self._exdate.append(exdate)
+
+ def _iter(self):
+ rlist = []
+ self._rdate.sort()
+ self._genitem(rlist, iter(self._rdate))
+ for gen in [iter(x) for x in self._rrule]:
+ self._genitem(rlist, gen)
+ exlist = []
+ self._exdate.sort()
+ self._genitem(exlist, iter(self._exdate))
+ for gen in [iter(x) for x in self._exrule]:
+ self._genitem(exlist, gen)
+ lastdt = None
+ total = 0
+ heapq.heapify(rlist)
+ heapq.heapify(exlist)
+ while rlist:
+ ritem = rlist[0]
+ if not lastdt or lastdt != ritem.dt:
+ while exlist and exlist[0] < ritem:
+ exitem = exlist[0]
+ advance_iterator(exitem)
+ if exlist and exlist[0] is exitem:
+ heapq.heapreplace(exlist, exitem)
+ if not exlist or ritem != exlist[0]:
+ total += 1
+ yield ritem.dt
+ lastdt = ritem.dt
+ advance_iterator(ritem)
+ if rlist and rlist[0] is ritem:
+ heapq.heapreplace(rlist, ritem)
+ self._len = total
+
+
+class _rrulestr(object):
+
+ _freq_map = {"YEARLY": YEARLY,
+ "MONTHLY": MONTHLY,
+ "WEEKLY": WEEKLY,
+ "DAILY": DAILY,
+ "HOURLY": HOURLY,
+ "MINUTELY": MINUTELY,
+ "SECONDLY": SECONDLY}
+
+ _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
+ "FR": 4, "SA": 5, "SU": 6}
+
+ def _handle_int(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = int(value)
+
+ def _handle_int_list(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
+
+ _handle_INTERVAL = _handle_int
+ _handle_COUNT = _handle_int
+ _handle_BYSETPOS = _handle_int_list
+ _handle_BYMONTH = _handle_int_list
+ _handle_BYMONTHDAY = _handle_int_list
+ _handle_BYYEARDAY = _handle_int_list
+ _handle_BYEASTER = _handle_int_list
+ _handle_BYWEEKNO = _handle_int_list
+ _handle_BYHOUR = _handle_int_list
+ _handle_BYMINUTE = _handle_int_list
+ _handle_BYSECOND = _handle_int_list
+
+ def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["freq"] = self._freq_map[value]
+
+ def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
+ global parser
+ if not parser:
+ from dateutil import parser
+ try:
+ rrkwargs["until"] = parser.parse(value,
+ ignoretz=kwargs.get("ignoretz"),
+ tzinfos=kwargs.get("tzinfos"))
+ except ValueError:
+ raise ValueError("invalid until date")
+
+ def _handle_WKST(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["wkst"] = self._weekday_map[value]
+
+ def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
+ """
+ Two ways to specify this: +1MO or MO(+1)
+ """
+ l = []
+ for wday in value.split(','):
+ if '(' in wday:
+ # If it's of the form TH(+1), etc.
+ splt = wday.split('(')
+ w = splt[0]
+ n = int(splt[1][:-1])
+ elif len(wday):
+ # If it's of the form +1MO
+ for i in range(len(wday)):
+ if wday[i] not in '+-0123456789':
+ break
+ n = wday[:i] or None
+ w = wday[i:]
+ if n:
+ n = int(n)
+ else:
+ raise ValueError("Invalid (empty) BYDAY specification.")
+
+ l.append(weekdays[self._weekday_map[w]](n))
+ rrkwargs["byweekday"] = l
+
+ _handle_BYDAY = _handle_BYWEEKDAY
+
+ def _parse_rfc_rrule(self, line,
+ dtstart=None,
+ cache=False,
+ ignoretz=False,
+ tzinfos=None):
+ if line.find(':') != -1:
+ name, value = line.split(':')
+ if name != "RRULE":
+ raise ValueError("unknown parameter name")
+ else:
+ value = line
+ rrkwargs = {}
+ for pair in value.split(';'):
+ name, value = pair.split('=')
+ name = name.upper()
+ value = value.upper()
+ try:
+ getattr(self, "_handle_"+name)(rrkwargs, name, value,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ except AttributeError:
+ raise ValueError("unknown parameter '%s'" % name)
+ except (KeyError, ValueError):
+ raise ValueError("invalid '%s': %s" % (name, value))
+ return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
+
+ def _parse_rfc(self, s,
+ dtstart=None,
+ cache=False,
+ unfold=False,
+ forceset=False,
+ compatible=False,
+ ignoretz=False,
+ tzinfos=None):
+ global parser
+ if compatible:
+ forceset = True
+ unfold = True
+ s = s.upper()
+ if not s.strip():
+ raise ValueError("empty string")
+ if unfold:
+ lines = s.splitlines()
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+ else:
+ lines = s.split()
+ if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
+ s.startswith('RRULE:'))):
+ return self._parse_rfc_rrule(lines[0], cache=cache,
+ dtstart=dtstart, ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ else:
+ rrulevals = []
+ rdatevals = []
+ exrulevals = []
+ exdatevals = []
+ for line in lines:
+ if not line:
+ continue
+ if line.find(':') == -1:
+ name = "RRULE"
+ value = line
+ else:
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0]
+ parms = parms[1:]
+ if name == "RRULE":
+ for parm in parms:
+ raise ValueError("unsupported RRULE parm: "+parm)
+ rrulevals.append(value)
+ elif name == "RDATE":
+ for parm in parms:
+ if parm != "VALUE=DATE-TIME":
+ raise ValueError("unsupported RDATE parm: "+parm)
+ rdatevals.append(value)
+ elif name == "EXRULE":
+ for parm in parms:
+ raise ValueError("unsupported EXRULE parm: "+parm)
+ exrulevals.append(value)
+ elif name == "EXDATE":
+ for parm in parms:
+ if parm != "VALUE=DATE-TIME":
+ raise ValueError("unsupported EXDATE parm: "+parm)
+ exdatevals.append(value)
+ elif name == "DTSTART":
+ for parm in parms:
+ raise ValueError("unsupported DTSTART parm: "+parm)
+ if not parser:
+ from dateutil import parser
+ dtstart = parser.parse(value, ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ else:
+ raise ValueError("unsupported property: "+name)
+ if (forceset or len(rrulevals) > 1 or rdatevals
+ or exrulevals or exdatevals):
+ if not parser and (rdatevals or exdatevals):
+ from dateutil import parser
+ rset = rruleset(cache=cache)
+ for value in rrulevals:
+ rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in rdatevals:
+ for datestr in value.split(','):
+ rset.rdate(parser.parse(datestr,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exrulevals:
+ rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exdatevals:
+ for datestr in value.split(','):
+ rset.exdate(parser.parse(datestr,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ if compatible and dtstart:
+ rset.rdate(dtstart)
+ return rset
+ else:
+ return self._parse_rfc_rrule(rrulevals[0],
+ dtstart=dtstart,
+ cache=cache,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+
+ def __call__(self, s, **kwargs):
+ return self._parse_rfc(s, **kwargs)
+
+
+rrulestr = _rrulestr()
+
+# vim:ts=4:sw=4:et
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/tz/__init__.py b/appWSM/lib/python2.7/site-packages/dateutil/tz/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0a5043c7679b0ba114740370a0836319824b3f7
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/tz/__init__.py
@@ -0,0 +1,5 @@
+from .tz import *
+
+__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
+ "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
+ "enfold", "datetime_ambiguous", "datetime_exists"]
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/tz/_common.py b/appWSM/lib/python2.7/site-packages/dateutil/tz/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1cf2afbd59331513a1884c27686f655170a4de4
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/tz/_common.py
@@ -0,0 +1,394 @@
+from six import PY3
+
+from functools import wraps
+
+from datetime import datetime, timedelta, tzinfo
+
+
+ZERO = timedelta(0)
+
+__all__ = ['tzname_in_python2', 'enfold']
+
+
+def tzname_in_python2(namefunc):
+ """Change unicode output into bytestrings in Python 2
+
+ tzname() API changed in Python 3. It used to return bytes, but was changed
+ to unicode strings
+ """
+ def adjust_encoding(*args, **kwargs):
+ name = namefunc(*args, **kwargs)
+ if name is not None and not PY3:
+ name = name.encode()
+
+ return name
+
+ return adjust_encoding
+
+
+# The following is adapted from Alexander Belopolsky's tz library
+# https://github.com/abalkin/tz
+if hasattr(datetime, 'fold'):
+ # This is the pre-python 3.6 fold situation
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ .. versionadded:: 2.6.0
+ """
+ return dt.replace(fold=fold)
+
+else:
+ class _DatetimeWithFold(datetime):
+ """
+ This is a class designed to provide a PEP 495-compliant interface for
+ Python versions before 3.6. It is used only for dates in a fold, so
+ the ``fold`` attribute is fixed at ``1``.
+
+ .. versionadded:: 2.6.0
+ """
+ __slots__ = ()
+
+ @property
+ def fold(self):
+ return 1
+
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ .. versionadded:: 2.6.0
+ """
+ if getattr(dt, 'fold', 0) == fold:
+ return dt
+
+ args = dt.timetuple()[:6]
+ args += (dt.microsecond, dt.tzinfo)
+
+ if fold:
+ return _DatetimeWithFold(*args)
+ else:
+ return datetime(*args)
+
+
+def _validate_fromutc_inputs(f):
+ """
+ The CPython version of ``fromutc`` checks that the input is a ``datetime``
+ object and that ``self`` is attached as its ``tzinfo``.
+ """
+ @wraps(f)
+ def fromutc(self, dt):
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ return f(self, dt)
+
+ return fromutc
+
+
+class _tzinfo(tzinfo):
+ """
+ Base class for all ``dateutil`` ``tzinfo`` objects.
+ """
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+
+ dt = dt.replace(tzinfo=self)
+
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
+
+ return same_dt and not same_offset
+
+ def _fold_status(self, dt_utc, dt_wall):
+ """
+ Determine the fold status of a "wall" datetime, given a representation
+ of the same datetime as a (naive) UTC datetime. This is calculated based
+ on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
+ datetimes, and that this offset is the actual number of hours separating
+ ``dt_utc`` and ``dt_wall``.
+
+ :param dt_utc:
+ Representation of the datetime as UTC
+
+ :param dt_wall:
+ Representation of the datetime as "wall time". This parameter must
+ either have a `fold` attribute or have a fold-naive
+ :class:`datetime.tzinfo` attached, otherwise the calculation may
+ fail.
+ """
+ if self.is_ambiguous(dt_wall):
+ delta_wall = dt_wall - dt_utc
+ _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
+ else:
+ _fold = 0
+
+ return _fold
+
+ def _fold(self, dt):
+ return getattr(dt, 'fold', 0)
+
+ def _fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurence, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.datetime` object.
+ """
+
+ # Re-implement the algorithm from Python's datetime.py
+ dtoff = dt.utcoffset()
+ if dtoff is None:
+ raise ValueError("fromutc() requires a non-None utcoffset() "
+ "result")
+
+ # The original datetime.py code assumes that `dst()` defaults to
+ # zero during ambiguous times. PEP 495 inverts this presumption, so
+ # for pre-PEP 495 versions of python, we need to tweak the algorithm.
+ dtdst = dt.dst()
+ if dtdst is None:
+ raise ValueError("fromutc() requires a non-None dst() result")
+ delta = dtoff - dtdst
+
+ dt += delta
+ # Set fold=1 so we can default to being in the fold for
+ # ambiguous dates.
+ dtdst = enfold(dt, fold=1).dst()
+ if dtdst is None:
+ raise ValueError("fromutc(): dt.dst gave inconsistent "
+ "results; cannot convert")
+ return dt + dtdst
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurance, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.datetime` object.
+ """
+ dt_wall = self._fromutc(dt)
+
+ # Calculate the fold status given the two datetimes.
+ _fold = self._fold_status(dt, dt_wall)
+
+ # Set the default fold value for ambiguous dates
+ return enfold(dt_wall, fold=_fold)
+
+
+class tzrangebase(_tzinfo):
+ """
+ This is an abstract base class for time zones represented by an annual
+ transition into and out of DST. Child classes should implement the following
+ methods:
+
+ * ``__init__(self, *args, **kwargs)``
+ * ``transitions(self, year)`` - this is expected to return a tuple of
+ datetimes representing the DST on and off transitions in standard
+ time.
+
+ A fully initialized ``tzrangebase`` subclass should also provide the
+ following attributes:
+ * ``hasdst``: Boolean whether or not the zone uses DST.
+ * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
+ representing the respective UTC offsets.
+ * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
+ abbreviations in DST and STD, respectively.
+ * ``_hasdst``: Whether or not the zone has DST.
+
+ .. versionadded:: 2.6.0
+ """
+ def __init__(self):
+ raise NotImplementedError('tzrangebase is an abstract base class')
+
+ def utcoffset(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_base_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if self._isdst(dt):
+ return self._dst_abbr
+ else:
+ return self._std_abbr
+
+ def fromutc(self, dt):
+ """ Given a datetime in UTC, return local time """
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ # Get transitions - if there are none, fixed offset
+ transitions = self.transitions(dt.year)
+ if transitions is None:
+ return dt + self.utcoffset(dt)
+
+ # Get the transition times in UTC
+ dston, dstoff = transitions
+
+ dston -= self._std_offset
+ dstoff -= self._std_offset
+
+ utc_transitions = (dston, dstoff)
+ dt_utc = dt.replace(tzinfo=None)
+
+ isdst = self._naive_isdst(dt_utc, utc_transitions)
+
+ if isdst:
+ dt_wall = dt + self._dst_offset
+ else:
+ dt_wall = dt + self._std_offset
+
+ _fold = int(not isdst and self.is_ambiguous(dt_wall))
+
+ return enfold(dt_wall, fold=_fold)
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if not self.hasdst:
+ return False
+
+ start, end = self.transitions(dt.year)
+
+ dt = dt.replace(tzinfo=None)
+ return (end <= dt < end + self._dst_base_offset)
+
+ def _isdst(self, dt):
+ if not self.hasdst:
+ return False
+ elif dt is None:
+ return None
+
+ transitions = self.transitions(dt.year)
+
+ if transitions is None:
+ return False
+
+ dt = dt.replace(tzinfo=None)
+
+ isdst = self._naive_isdst(dt, transitions)
+
+ # Handle ambiguous dates
+ if not isdst and self.is_ambiguous(dt):
+ return not self._fold(dt)
+ else:
+ return isdst
+
+ def _naive_isdst(self, dt, transitions):
+ dston, dstoff = transitions
+
+ dt = dt.replace(tzinfo=None)
+
+ if dston < dstoff:
+ isdst = dston <= dt < dstoff
+ else:
+ isdst = not dstoff <= dt < dston
+
+ return isdst
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_offset - self._std_offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(...)" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+def _total_seconds(td):
+ # Python 2.6 doesn't have a total_seconds() method on timedelta objects
+ return ((td.seconds + td.days * 86400) * 1000000 +
+ td.microseconds) // 1000000
+
+
+_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds)
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/tz/tz.py b/appWSM/lib/python2.7/site-packages/dateutil/tz/tz.py
new file mode 100644
index 0000000000000000000000000000000000000000..94682829909f82630478abc998d0629bf2c20078
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/tz/tz.py
@@ -0,0 +1,1511 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers timezone implementations subclassing the abstract
+:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
+(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
+environment string (in all known formats), given ranges (with help from
+relative deltas), local machine timezone, fixed offset timezone, and UTC
+timezone.
+"""
+import datetime
+import struct
+import time
+import sys
+import os
+import bisect
+
+from six import string_types
+from ._common import tzname_in_python2, _tzinfo, _total_seconds
+from ._common import tzrangebase, enfold
+from ._common import _validate_fromutc_inputs
+
+try:
+ from .win import tzwin, tzwinlocal
+except ImportError:
+ tzwin = tzwinlocal = None
+
+ZERO = datetime.timedelta(0)
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+EPOCHORDINAL = EPOCH.toordinal()
+
+
+class tzutc(datetime.tzinfo):
+ """
+ This is a tzinfo object that represents the UTC time zone.
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def dst(self, dt):
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return "UTC"
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ """
+ Fast track version of fromutc() returns the original ``dt`` object for
+ any valid :py:class:`datetime.datetime` object.
+ """
+ return dt
+
+ def __eq__(self, other):
+ if not isinstance(other, (tzutc, tzoffset)):
+ return NotImplemented
+
+ return (isinstance(other, tzutc) or
+ (isinstance(other, tzoffset) and other._offset == ZERO))
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+class tzoffset(datetime.tzinfo):
+ """
+ A simple class for representing a fixed offset from UTC.
+
+ :param name:
+ The timezone name, to be returned when ``tzname()`` is called.
+
+ :param offset:
+ The time zone offset in seconds, or (since version 2.6.0, represented
+ as a :py:class:`datetime.timedelta` object.
+ """
+ def __init__(self, name, offset):
+ self._name = name
+
+ try:
+ # Allow a timedelta
+ offset = _total_seconds(offset)
+ except (TypeError, AttributeError):
+ pass
+ self._offset = datetime.timedelta(seconds=offset)
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def dst(self, dt):
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._name
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ return dt + self._offset
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ def __eq__(self, other):
+ if not isinstance(other, tzoffset):
+ return NotImplemented
+
+ return self._offset == other._offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s, %s)" % (self.__class__.__name__,
+ repr(self._name),
+ int(_total_seconds(self._offset)))
+
+ __reduce__ = object.__reduce__
+
+
+class tzlocal(_tzinfo):
+ """
+ A :class:`tzinfo` subclass built around the ``time`` timezone functions.
+ """
+ def __init__(self):
+ super(tzlocal, self).__init__()
+
+ self._std_offset = datetime.timedelta(seconds=-time.timezone)
+ if time.daylight:
+ self._dst_offset = datetime.timedelta(seconds=-time.altzone)
+ else:
+ self._dst_offset = self._std_offset
+
+ self._dst_saved = self._dst_offset - self._std_offset
+ self._hasdst = bool(self._dst_saved)
+
+ def utcoffset(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset - self._std_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return time.tzname[self._isdst(dt)]
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ naive_dst = self._naive_is_dst(dt)
+ return (not naive_dst and
+ (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
+
+ def _naive_is_dst(self, dt):
+ timestamp = _datetime_to_timestamp(dt)
+ return time.localtime(timestamp + time.timezone).tm_isdst
+
+ def _isdst(self, dt, fold_naive=True):
+ # We can't use mktime here. It is unstable when deciding if
+ # the hour near to a change is DST or not.
+ #
+ # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
+ # dt.minute, dt.second, dt.weekday(), 0, -1))
+ # return time.localtime(timestamp).tm_isdst
+ #
+ # The code above yields the following result:
+ #
+ # >>> import tz, datetime
+ # >>> t = tz.tzlocal()
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ #
+ # Here is a more stable implementation:
+ #
+ if not self._hasdst:
+ return False
+
+ # Check for ambiguous times:
+ dstval = self._naive_is_dst(dt)
+ fold = getattr(dt, 'fold', None)
+
+ if self.is_ambiguous(dt):
+ if fold is not None:
+ return not self._fold(dt)
+ else:
+ return True
+
+ return dstval
+
+ def __eq__(self, other):
+ if not isinstance(other, tzlocal):
+ return NotImplemented
+
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+class _ttinfo(object):
+ __slots__ = ["offset", "delta", "isdst", "abbr",
+ "isstd", "isgmt", "dstoffset"]
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def __repr__(self):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ttinfo):
+ return NotImplemented
+
+ return (self.offset == other.offset and
+ self.delta == other.delta and
+ self.isdst == other.isdst and
+ self.abbr == other.abbr and
+ self.isstd == other.isstd and
+ self.isgmt == other.isgmt and
+ self.dstoffset == other.dstoffset)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __getstate__(self):
+ state = {}
+ for name in self.__slots__:
+ state[name] = getattr(self, name, None)
+ return state
+
+ def __setstate__(self, state):
+ for name in self.__slots__:
+ if name in state:
+ setattr(self, name, state[name])
+
+
+class _tzfile(object):
+ """
+ Lightweight class for holding the relevant transition and time zone
+ information read from binary tzfiles.
+ """
+ attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
+ 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
+
+ def __init__(self, **kwargs):
+ for attr in self.attrs:
+ setattr(self, attr, kwargs.get(attr, None))
+
+
+class tzfile(_tzinfo):
+ """
+ This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)``
+ format timezone files to extract current and historical zone information.
+
+ :param fileobj:
+ This can be an opened file stream or a file name that the time zone
+ information can be read from.
+
+ :param filename:
+ This is an optional parameter specifying the source of the time zone
+ information in the event that ``fileobj`` is a file object. If omitted
+ and ``fileobj`` is a file stream, this parameter will be set either to
+ ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
+
+ See `Sources for Time Zone and Daylight Saving Time Data
+ `_ for more information. Time zone
+ files can be compiled from the `IANA Time Zone database files
+ `_ with the `zic time zone compiler
+ `_
+ """
+
+ def __init__(self, fileobj, filename=None):
+ super(tzfile, self).__init__()
+
+ file_opened_here = False
+ if isinstance(fileobj, string_types):
+ self._filename = fileobj
+ fileobj = open(fileobj, 'rb')
+ file_opened_here = True
+ elif filename is not None:
+ self._filename = filename
+ elif hasattr(fileobj, "name"):
+ self._filename = fileobj.name
+ else:
+ self._filename = repr(fileobj)
+
+ if fileobj is not None:
+ if not file_opened_here:
+ fileobj = _ContextWrapper(fileobj)
+
+ with fileobj as file_stream:
+ tzobj = self._read_tzfile(file_stream)
+
+ self._set_tzdata(tzobj)
+
+ def _set_tzdata(self, tzobj):
+ """ Set the time zone data of this object from a _tzfile object """
+ # Copy the relevant attributes over as private attributes
+ for attr in _tzfile.attrs:
+ setattr(self, '_' + attr, getattr(tzobj, attr))
+
+ def _read_tzfile(self, fileobj):
+ out = _tzfile()
+
+ # From tzfile(5):
+ #
+ # The time zone information files used by tzset(3)
+ # begin with the magic characters "TZif" to identify
+ # them as time zone information files, followed by
+ # sixteen bytes reserved for future use, followed by
+ # six four-byte values of type long, written in a
+ # ``standard'' byte order (the high-order byte
+ # of the value is written first).
+ if fileobj.read(4).decode() != "TZif":
+ raise ValueError("magic not found")
+
+ fileobj.read(16)
+
+ (
+ # The number of UTC/local indicators stored in the file.
+ ttisgmtcnt,
+
+ # The number of standard/wall indicators stored in the file.
+ ttisstdcnt,
+
+ # The number of leap seconds for which data is
+ # stored in the file.
+ leapcnt,
+
+ # The number of "transition times" for which data
+ # is stored in the file.
+ timecnt,
+
+ # The number of "local time types" for which data
+ # is stored in the file (must not be zero).
+ typecnt,
+
+ # The number of characters of "time zone
+ # abbreviation strings" stored in the file.
+ charcnt,
+
+ ) = struct.unpack(">6l", fileobj.read(24))
+
+ # The above header is followed by tzh_timecnt four-byte
+ # values of type long, sorted in ascending order.
+ # These values are written in ``standard'' byte order.
+ # Each is used as a transition time (as returned by
+ # time(2)) at which the rules for computing local time
+ # change.
+
+ if timecnt:
+ out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
+ fileobj.read(timecnt*4)))
+ else:
+ out.trans_list_utc = []
+
+ # Next come tzh_timecnt one-byte values of type unsigned
+ # char; each one tells which of the different types of
+ # ``local time'' types described in the file is associated
+ # with the same-indexed transition time. These values
+ # serve as indices into an array of ttinfo structures that
+ # appears next in the file.
+
+ if timecnt:
+ out.trans_idx = struct.unpack(">%dB" % timecnt,
+ fileobj.read(timecnt))
+ else:
+ out.trans_idx = []
+
+ # Each ttinfo structure is written as a four-byte value
+ # for tt_gmtoff of type long, in a standard byte
+ # order, followed by a one-byte value for tt_isdst
+ # and a one-byte value for tt_abbrind. In each
+ # structure, tt_gmtoff gives the number of
+ # seconds to be added to UTC, tt_isdst tells whether
+ # tm_isdst should be set by localtime(3), and
+ # tt_abbrind serves as an index into the array of
+ # time zone abbreviation characters that follow the
+ # ttinfo structure(s) in the file.
+
+ ttinfo = []
+
+ for i in range(typecnt):
+ ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
+
+ abbr = fileobj.read(charcnt).decode()
+
+ # Then there are tzh_leapcnt pairs of four-byte
+ # values, written in standard byte order; the
+ # first value of each pair gives the time (as
+ # returned by time(2)) at which a leap second
+ # occurs; the second gives the total number of
+ # leap seconds to be applied after the given time.
+ # The pairs of values are sorted in ascending order
+ # by time.
+
+ # Not used, for now (but seek for correct file position)
+ if leapcnt:
+ fileobj.seek(leapcnt * 8, os.SEEK_CUR)
+
+ # Then there are tzh_ttisstdcnt standard/wall
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as standard
+ # time or wall clock time, and are used when
+ # a time zone file is used in handling POSIX-style
+ # time zone environment variables.
+
+ if ttisstdcnt:
+ isstd = struct.unpack(">%db" % ttisstdcnt,
+ fileobj.read(ttisstdcnt))
+
+ # Finally, there are tzh_ttisgmtcnt UTC/local
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as UTC or
+ # local time, and are used when a time zone file
+ # is used in handling POSIX-style time zone envi-
+ # ronment variables.
+
+ if ttisgmtcnt:
+ isgmt = struct.unpack(">%db" % ttisgmtcnt,
+ fileobj.read(ttisgmtcnt))
+
+ # Build ttinfo list
+ out.ttinfo_list = []
+ for i in range(typecnt):
+ gmtoff, isdst, abbrind = ttinfo[i]
+ # Round to full-minutes if that's not the case. Python's
+ # datetime doesn't accept sub-minute timezones. Check
+ # http://python.org/sf/1447945 for some information.
+ gmtoff = 60 * ((gmtoff + 30) // 60)
+ tti = _ttinfo()
+ tti.offset = gmtoff
+ tti.dstoffset = datetime.timedelta(0)
+ tti.delta = datetime.timedelta(seconds=gmtoff)
+ tti.isdst = isdst
+ tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
+ tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
+ tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
+ out.ttinfo_list.append(tti)
+
+ # Replace ttinfo indexes for ttinfo objects.
+ out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
+
+ # Set standard, dst, and before ttinfos. before will be
+ # used when a given time is before any transitions,
+ # and will be set to the first non-dst ttinfo, or to
+ # the first dst, if all of them are dst.
+ out.ttinfo_std = None
+ out.ttinfo_dst = None
+ out.ttinfo_before = None
+ if out.ttinfo_list:
+ if not out.trans_list_utc:
+ out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
+ else:
+ for i in range(timecnt-1, -1, -1):
+ tti = out.trans_idx[i]
+ if not out.ttinfo_std and not tti.isdst:
+ out.ttinfo_std = tti
+ elif not out.ttinfo_dst and tti.isdst:
+ out.ttinfo_dst = tti
+
+ if out.ttinfo_std and out.ttinfo_dst:
+ break
+ else:
+ if out.ttinfo_dst and not out.ttinfo_std:
+ out.ttinfo_std = out.ttinfo_dst
+
+ for tti in out.ttinfo_list:
+ if not tti.isdst:
+ out.ttinfo_before = tti
+ break
+ else:
+ out.ttinfo_before = out.ttinfo_list[0]
+
+ # Now fix transition times to become relative to wall time.
+ #
+ # I'm not sure about this. In my tests, the tz source file
+ # is setup to wall time, and in the binary file isstd and
+ # isgmt are off, so it should be in wall time. OTOH, it's
+ # always in gmt time. Let me know if you have comments
+ # about this.
+ laststdoffset = None
+ out.trans_list = []
+ for i, tti in enumerate(out.trans_idx):
+ if not tti.isdst:
+ offset = tti.offset
+ laststdoffset = offset
+ else:
+ if laststdoffset is not None:
+ # Store the DST offset as well and update it in the list
+ tti.dstoffset = tti.offset - laststdoffset
+ out.trans_idx[i] = tti
+
+ offset = laststdoffset or 0
+
+ out.trans_list.append(out.trans_list_utc[i] + offset)
+
+ # In case we missed any DST offsets on the way in for some reason, make
+ # a second pass over the list, looking for the /next/ DST offset.
+ laststdoffset = None
+ for i in reversed(range(len(out.trans_idx))):
+ tti = out.trans_idx[i]
+ if tti.isdst:
+ if not (tti.dstoffset or laststdoffset is None):
+ tti.dstoffset = tti.offset - laststdoffset
+ else:
+ laststdoffset = tti.offset
+
+ if not isinstance(tti.dstoffset, datetime.timedelta):
+ tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
+
+ out.trans_idx[i] = tti
+
+ out.trans_idx = tuple(out.trans_idx)
+ out.trans_list = tuple(out.trans_list)
+ out.trans_list_utc = tuple(out.trans_list_utc)
+
+ return out
+
+ def _find_last_transition(self, dt, in_utc=False):
+ # If there's no list, there are no transitions to find
+ if not self._trans_list:
+ return None
+
+ timestamp = _datetime_to_timestamp(dt)
+
+ # Find where the timestamp fits in the transition list - if the
+ # timestamp is a transition time, it's part of the "after" period.
+ trans_list = self._trans_list_utc if in_utc else self._trans_list
+ idx = bisect.bisect_right(trans_list, timestamp)
+
+ # We want to know when the previous transition was, so subtract off 1
+ return idx - 1
+
+ def _get_ttinfo(self, idx):
+ # For no list or after the last transition, default to _ttinfo_std
+ if idx is None or (idx + 1) >= len(self._trans_list):
+ return self._ttinfo_std
+
+ # If there is a list and the time is before it, return _ttinfo_before
+ if idx < 0:
+ return self._ttinfo_before
+
+ return self._trans_idx[idx]
+
+ def _find_ttinfo(self, dt):
+ idx = self._resolve_ambiguous_time(dt)
+
+ return self._get_ttinfo(idx)
+
+ def fromutc(self, dt):
+ """
+ The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
+
+ :param dt:
+ A :py:class:`datetime.datetime` object.
+
+ :raises TypeError:
+ Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
+
+ :raises ValueError:
+ Raised if this is called with a ``dt`` which does not have this
+ ``tzinfo`` attached.
+
+ :return:
+ Returns a :py:class:`datetime.datetime` object representing the
+ wall time in ``self``'s time zone.
+ """
+ # These isinstance checks are in datetime.tzinfo, so we'll preserve
+ # them, even if we don't care about duck typing.
+ if not isinstance(dt, datetime.datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ # First treat UTC as wall time and get the transition we're in.
+ idx = self._find_last_transition(dt, in_utc=True)
+ tti = self._get_ttinfo(idx)
+
+ dt_out = dt + datetime.timedelta(seconds=tti.offset)
+
+ fold = self.is_ambiguous(dt_out, idx=idx)
+
+ return enfold(dt_out, fold=int(fold))
+
+ def is_ambiguous(self, dt, idx=None):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if idx is None:
+ idx = self._find_last_transition(dt)
+
+ # Calculate the difference in offsets from current to previous
+ timestamp = _datetime_to_timestamp(dt)
+ tti = self._get_ttinfo(idx)
+
+ if idx is None or idx <= 0:
+ return False
+
+ od = self._get_ttinfo(idx - 1).offset - tti.offset
+ tt = self._trans_list[idx] # Transition time
+
+ return timestamp < tt + od
+
+ def _resolve_ambiguous_time(self, dt):
+ idx = self._find_last_transition(dt)
+
+ # If we have no transitions, return the index
+ _fold = self._fold(dt)
+ if idx is None or idx == 0:
+ return idx
+
+ # If it's ambiguous and we're in a fold, shift to a different index.
+ idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
+
+ return idx - idx_offset
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_std:
+ return ZERO
+
+ return self._find_ttinfo(dt).delta
+
+ def dst(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_dst:
+ return ZERO
+
+ tti = self._find_ttinfo(dt)
+
+ if not tti.isdst:
+ return ZERO
+
+ # The documentation says that utcoffset()-dst() must
+ # be constant for every dt.
+ return tti.dstoffset
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if not self._ttinfo_std or dt is None:
+ return None
+ return self._find_ttinfo(dt).abbr
+
+ def __eq__(self, other):
+ if not isinstance(other, tzfile):
+ return NotImplemented
+ return (self._trans_list == other._trans_list and
+ self._trans_idx == other._trans_idx and
+ self._ttinfo_list == other._ttinfo_list)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
+
+ def __reduce__(self):
+ return self.__reduce_ex__(None)
+
+ def __reduce_ex__(self, protocol):
+ return (self.__class__, (None, self._filename), self.__dict__)
+
+
+class tzrange(tzrangebase):
+ """
+ The ``tzrange`` object is a time zone specified by a set of offsets and
+ abbreviations, equivalent to the way the ``TZ`` variable can be specified
+ in POSIX-like systems, but using Python delta objects to specify DST
+ start, end and offsets.
+
+ :param stdabbr:
+ The abbreviation for standard time (e.g. ``'EST'``).
+
+ :param stdoffset:
+ An integer or :class:`datetime.timedelta` object or equivalent
+ specifying the base offset from UTC.
+
+ If unspecified, +00:00 is used.
+
+ :param dstabbr:
+ The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
+
+ If specified, with no other DST information, DST is assumed to occur
+ and the default behavior or ``dstoffset``, ``start`` and ``end`` is
+ used. If unspecified and no other DST information is specified, it
+ is assumed that this zone has no DST.
+
+ If this is unspecified and other DST information is *is* specified,
+ DST occurs in the zone but the time zone abbreviation is left
+ unchanged.
+
+ :param dstoffset:
+ A an integer or :class:`datetime.timedelta` object or equivalent
+ specifying the UTC offset during DST. If unspecified and any other DST
+ information is specified, it is assumed to be the STD offset +1 hour.
+
+ :param start:
+ A :class:`relativedelta.relativedelta` object or equivalent specifying
+ the time and time of year that daylight savings time starts. To specify,
+ for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
+
+ ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
+
+ If unspecified and any other DST information is specified, the default
+ value is 2 AM on the first Sunday in April.
+
+ :param end:
+ A :class:`relativedelta.relativedelta` object or equivalent representing
+ the time and time of year that daylight savings time ends, with the
+ same specification method as in ``start``. One note is that this should
+ point to the first time in the *standard* zone, so if a transition
+ occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
+ set the `hours` parameter to +1.
+
+
+ **Examples:**
+
+ .. testsetup:: tzrange
+
+ from dateutil.tz import tzrange, tzstr
+
+ .. doctest:: tzrange
+
+ >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
+ True
+
+ >>> from dateutil.relativedelta import *
+ >>> range1 = tzrange("EST", -18000, "EDT")
+ >>> range2 = tzrange("EST", -18000, "EDT", -14400,
+ ... relativedelta(hours=+2, month=4, day=1,
+ ... weekday=SU(+1)),
+ ... relativedelta(hours=+1, month=10, day=31,
+ ... weekday=SU(-1)))
+ >>> tzstr('EST5EDT') == range1 == range2
+ True
+
+ """
+ def __init__(self, stdabbr, stdoffset=None,
+ dstabbr=None, dstoffset=None,
+ start=None, end=None):
+
+ global relativedelta
+ from dateutil import relativedelta
+
+ self._std_abbr = stdabbr
+ self._dst_abbr = dstabbr
+
+ try:
+ stdoffset = _total_seconds(stdoffset)
+ except (TypeError, AttributeError):
+ pass
+
+ try:
+ dstoffset = _total_seconds(dstoffset)
+ except (TypeError, AttributeError):
+ pass
+
+ if stdoffset is not None:
+ self._std_offset = datetime.timedelta(seconds=stdoffset)
+ else:
+ self._std_offset = ZERO
+
+ if dstoffset is not None:
+ self._dst_offset = datetime.timedelta(seconds=dstoffset)
+ elif dstabbr and stdoffset is not None:
+ self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
+ else:
+ self._dst_offset = ZERO
+
+ if dstabbr and start is None:
+ self._start_delta = relativedelta.relativedelta(
+ hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
+ else:
+ self._start_delta = start
+
+ if dstabbr and end is None:
+ self._end_delta = relativedelta.relativedelta(
+ hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
+ else:
+ self._end_delta = end
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = bool(self._start_delta)
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+ if not self.hasdst:
+ return None
+
+ base_year = datetime.datetime(year, 1, 1)
+
+ start = base_year + self._start_delta
+ end = base_year + self._end_delta
+
+ return (start, end)
+
+ def __eq__(self, other):
+ if not isinstance(other, tzrange):
+ return NotImplemented
+
+ return (self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr and
+ self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._start_delta == other._start_delta and
+ self._end_delta == other._end_delta)
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+class tzstr(tzrange):
+ """
+ ``tzstr`` objects are time zone objects specified by a time-zone string as
+ it would be passed to a ``TZ`` variable on POSIX-style systems (see
+ the `GNU C Library: TZ Variable`_ for more details).
+
+ There is one notable exception, which is that POSIX-style time zones use an
+ inverted offset format, so normally ``GMT+3`` would be parsed as an offset
+ 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
+ offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
+ behavior, pass a ``True`` value to ``posix_offset``.
+
+ The :class:`tzrange` object provides the same functionality, but is
+ specified using :class:`relativedelta.relativedelta` objects. rather than
+ strings.
+
+ :param s:
+ A time zone string in ``TZ`` variable format. This can be a
+ :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
+ or a stream emitting unicode characters (e.g. :class:`StringIO`).
+
+ :param posix_offset:
+ Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
+ ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
+ POSIX standard.
+
+ .. _`GNU C Library: TZ Variable`:
+ https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
+ """
+ def __init__(self, s, posix_offset=False):
+ global parser
+ from dateutil import parser
+
+ self._s = s
+
+ res = parser._parsetz(s)
+ if res is None:
+ raise ValueError("unknown string format")
+
+ # Here we break the compatibility with the TZ variable handling.
+ # GMT-3 actually *means* the timezone -3.
+ if res.stdabbr in ("GMT", "UTC") and not posix_offset:
+ res.stdoffset *= -1
+
+ # We must initialize it first, since _delta() needs
+ # _std_offset and _dst_offset set. Use False in start/end
+ # to avoid building it two times.
+ tzrange.__init__(self, res.stdabbr, res.stdoffset,
+ res.dstabbr, res.dstoffset,
+ start=False, end=False)
+
+ if not res.dstabbr:
+ self._start_delta = None
+ self._end_delta = None
+ else:
+ self._start_delta = self._delta(res.start)
+ if self._start_delta:
+ self._end_delta = self._delta(res.end, isend=1)
+
+ self.hasdst = bool(self._start_delta)
+
+ def _delta(self, x, isend=0):
+ from dateutil import relativedelta
+ kwargs = {}
+ if x.month is not None:
+ kwargs["month"] = x.month
+ if x.weekday is not None:
+ kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
+ if x.week > 0:
+ kwargs["day"] = 1
+ else:
+ kwargs["day"] = 31
+ elif x.day:
+ kwargs["day"] = x.day
+ elif x.yday is not None:
+ kwargs["yearday"] = x.yday
+ elif x.jyday is not None:
+ kwargs["nlyearday"] = x.jyday
+ if not kwargs:
+ # Default is to start on first sunday of april, and end
+ # on last sunday of october.
+ if not isend:
+ kwargs["month"] = 4
+ kwargs["day"] = 1
+ kwargs["weekday"] = relativedelta.SU(+1)
+ else:
+ kwargs["month"] = 10
+ kwargs["day"] = 31
+ kwargs["weekday"] = relativedelta.SU(-1)
+ if x.time is not None:
+ kwargs["seconds"] = x.time
+ else:
+ # Default is 2AM.
+ kwargs["seconds"] = 7200
+ if isend:
+ # Convert to standard time, to follow the documented way
+ # of working with the extra hour. See the documentation
+ # of the tzinfo class.
+ delta = self._dst_offset - self._std_offset
+ kwargs["seconds"] -= delta.seconds + delta.days * 86400
+ return relativedelta.relativedelta(**kwargs)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+
+class _tzicalvtzcomp(object):
+ def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
+ tzname=None, rrule=None):
+ self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
+ self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
+ self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
+ self.isdst = isdst
+ self.tzname = tzname
+ self.rrule = rrule
+
+
+class _tzicalvtz(_tzinfo):
+ def __init__(self, tzid, comps=[]):
+ super(_tzicalvtz, self).__init__()
+
+ self._tzid = tzid
+ self._comps = comps
+ self._cachedate = []
+ self._cachecomp = []
+
+ def _find_comp(self, dt):
+ if len(self._comps) == 1:
+ return self._comps[0]
+
+ dt = dt.replace(tzinfo=None)
+
+ try:
+ return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
+ except ValueError:
+ pass
+
+ lastcompdt = None
+ lastcomp = None
+
+ for comp in self._comps:
+ compdt = self._find_compdt(comp, dt)
+
+ if compdt and (not lastcompdt or lastcompdt < compdt):
+ lastcompdt = compdt
+ lastcomp = comp
+
+ if not lastcomp:
+ # RFC says nothing about what to do when a given
+ # time is before the first onset date. We'll look for the
+ # first standard component, or the first component, if
+ # none is found.
+ for comp in self._comps:
+ if not comp.isdst:
+ lastcomp = comp
+ break
+ else:
+ lastcomp = comp[0]
+
+ self._cachedate.insert(0, (dt, self._fold(dt)))
+ self._cachecomp.insert(0, lastcomp)
+
+ if len(self._cachedate) > 10:
+ self._cachedate.pop()
+ self._cachecomp.pop()
+
+ return lastcomp
+
+ def _find_compdt(self, comp, dt):
+ if comp.tzoffsetdiff < ZERO and self._fold(dt):
+ dt -= comp.tzoffsetdiff
+
+ compdt = comp.rrule.before(dt, inc=True)
+
+ return compdt
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ return self._find_comp(dt).tzoffsetto
+
+ def dst(self, dt):
+ comp = self._find_comp(dt)
+ if comp.isdst:
+ return comp.tzoffsetdiff
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._find_comp(dt).tzname
+
+ def __repr__(self):
+ return "" % repr(self._tzid)
+
+ __reduce__ = object.__reduce__
+
+
+class tzical(object):
+ """
+ This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
+ as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
+
+ :param `fileobj`:
+ A file or stream in iCalendar format, which should be UTF-8 encoded
+ with CRLF endings.
+
+ .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
+ """
+ def __init__(self, fileobj):
+ global rrule
+ from dateutil import rrule
+
+ if isinstance(fileobj, string_types):
+ self._s = fileobj
+ # ical should be encoded in UTF-8 with CRLF
+ fileobj = open(fileobj, 'r')
+ else:
+ self._s = getattr(fileobj, 'name', repr(fileobj))
+ fileobj = _ContextWrapper(fileobj)
+
+ self._vtz = {}
+
+ with fileobj as fobj:
+ self._parse_rfc(fobj.read())
+
+ def keys(self):
+ """
+ Retrieves the available time zones as a list.
+ """
+ return list(self._vtz.keys())
+
+ def get(self, tzid=None):
+ """
+ Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
+
+ :param tzid:
+ If there is exactly one time zone available, omitting ``tzid``
+ or passing :py:const:`None` value returns it. Otherwise a valid
+ key (which can be retrieved from :func:`keys`) is required.
+
+ :raises ValueError:
+ Raised if ``tzid`` is not specified but there are either more
+ or fewer than 1 zone defined.
+
+ :returns:
+ Returns either a :py:class:`datetime.tzinfo` object representing
+ the relevant time zone or :py:const:`None` if the ``tzid`` was
+ not found.
+ """
+ if tzid is None:
+ if len(self._vtz) == 0:
+ raise ValueError("no timezones defined")
+ elif len(self._vtz) > 1:
+ raise ValueError("more than one timezone available")
+ tzid = next(iter(self._vtz))
+
+ return self._vtz.get(tzid)
+
+ def _parse_offset(self, s):
+ s = s.strip()
+ if not s:
+ raise ValueError("empty offset")
+ if s[0] in ('+', '-'):
+ signal = (-1, +1)[s[0] == '+']
+ s = s[1:]
+ else:
+ signal = +1
+ if len(s) == 4:
+ return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
+ elif len(s) == 6:
+ return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
+ else:
+ raise ValueError("invalid offset: " + s)
+
+ def _parse_rfc(self, s):
+ lines = s.splitlines()
+ if not lines:
+ raise ValueError("empty string")
+
+ # Unfold
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+
+ tzid = None
+ comps = []
+ invtz = False
+ comptype = None
+ for line in lines:
+ if not line:
+ continue
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0].upper()
+ parms = parms[1:]
+ if invtz:
+ if name == "BEGIN":
+ if value in ("STANDARD", "DAYLIGHT"):
+ # Process component
+ pass
+ else:
+ raise ValueError("unknown component: "+value)
+ comptype = value
+ founddtstart = False
+ tzoffsetfrom = None
+ tzoffsetto = None
+ rrulelines = []
+ tzname = None
+ elif name == "END":
+ if value == "VTIMEZONE":
+ if comptype:
+ raise ValueError("component not closed: "+comptype)
+ if not tzid:
+ raise ValueError("mandatory TZID not found")
+ if not comps:
+ raise ValueError(
+ "at least one component is needed")
+ # Process vtimezone
+ self._vtz[tzid] = _tzicalvtz(tzid, comps)
+ invtz = False
+ elif value == comptype:
+ if not founddtstart:
+ raise ValueError("mandatory DTSTART not found")
+ if tzoffsetfrom is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ if tzoffsetto is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ # Process component
+ rr = None
+ if rrulelines:
+ rr = rrule.rrulestr("\n".join(rrulelines),
+ compatible=True,
+ ignoretz=True,
+ cache=True)
+ comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
+ (comptype == "DAYLIGHT"),
+ tzname, rr)
+ comps.append(comp)
+ comptype = None
+ else:
+ raise ValueError("invalid component end: "+value)
+ elif comptype:
+ if name == "DTSTART":
+ rrulelines.append(line)
+ founddtstart = True
+ elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
+ rrulelines.append(line)
+ elif name == "TZOFFSETFROM":
+ if parms:
+ raise ValueError(
+ "unsupported %s parm: %s " % (name, parms[0]))
+ tzoffsetfrom = self._parse_offset(value)
+ elif name == "TZOFFSETTO":
+ if parms:
+ raise ValueError(
+ "unsupported TZOFFSETTO parm: "+parms[0])
+ tzoffsetto = self._parse_offset(value)
+ elif name == "TZNAME":
+ if parms:
+ raise ValueError(
+ "unsupported TZNAME parm: "+parms[0])
+ tzname = value
+ elif name == "COMMENT":
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ else:
+ if name == "TZID":
+ if parms:
+ raise ValueError(
+ "unsupported TZID parm: "+parms[0])
+ tzid = value
+ elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ elif name == "BEGIN" and value == "VTIMEZONE":
+ tzid = None
+ comps = []
+ invtz = True
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+
+if sys.platform != "win32":
+ TZFILES = ["/etc/localtime", "localtime"]
+ TZPATHS = ["/usr/share/zoneinfo",
+ "/usr/lib/zoneinfo",
+ "/usr/share/lib/zoneinfo",
+ "/etc/zoneinfo"]
+else:
+ TZFILES = []
+ TZPATHS = []
+
+
+def gettz(name=None):
+ tz = None
+ if not name:
+ try:
+ name = os.environ["TZ"]
+ except KeyError:
+ pass
+ if name is None or name == ":":
+ for filepath in TZFILES:
+ if not os.path.isabs(filepath):
+ filename = filepath
+ for path in TZPATHS:
+ filepath = os.path.join(path, filename)
+ if os.path.isfile(filepath):
+ break
+ else:
+ continue
+ if os.path.isfile(filepath):
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = tzlocal()
+ else:
+ if name.startswith(":"):
+ name = name[:-1]
+ if os.path.isabs(name):
+ if os.path.isfile(name):
+ tz = tzfile(name)
+ else:
+ tz = None
+ else:
+ for path in TZPATHS:
+ filepath = os.path.join(path, name)
+ if not os.path.isfile(filepath):
+ filepath = filepath.replace(' ', '_')
+ if not os.path.isfile(filepath):
+ continue
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = None
+ if tzwin is not None:
+ try:
+ tz = tzwin(name)
+ except WindowsError:
+ tz = None
+
+ if not tz:
+ from dateutil.zoneinfo import get_zonefile_instance
+ tz = get_zonefile_instance().get(name)
+
+ if not tz:
+ for c in name:
+ # name must have at least one offset to be a tzstr
+ if c in "0123456789":
+ try:
+ tz = tzstr(name)
+ except ValueError:
+ pass
+ break
+ else:
+ if name in ("GMT", "UTC"):
+ tz = tzutc()
+ elif name in time.tzname:
+ tz = tzlocal()
+ return tz
+
+
+def datetime_exists(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ would fall in a gap.
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" exists in ``tz``.
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+ tz = dt.tzinfo
+
+ dt = dt.replace(tzinfo=None)
+
+ # This is essentially a test of whether or not the datetime can survive
+ # a round trip to UTC.
+ dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
+ dt_rt = dt_rt.replace(tzinfo=None)
+
+ return dt == dt_rt
+
+
+def datetime_ambiguous(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ is ambiguous (i.e if there are two times differentiated only by their DST
+ status).
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" is ambiguous in
+ ``tz``.
+
+ .. versionadded:: 2.6.0
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+
+ tz = dt.tzinfo
+
+ # If a time zone defines its own "is_ambiguous" function, we'll use that.
+ is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
+ if is_ambiguous_fn is not None:
+ try:
+ return tz.is_ambiguous(dt)
+ except:
+ pass
+
+ # If it doesn't come out and tell us it's ambiguous, we'll just check if
+ # the fold attribute has any effect on this particular date and time.
+ dt = dt.replace(tzinfo=tz)
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dst = wall_0.dst() == wall_1.dst()
+
+ return not (same_offset and same_dst)
+
+
+def _datetime_to_timestamp(dt):
+ """
+ Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
+ since January 1, 1970, ignoring the time zone.
+ """
+ return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
+
+
+class _ContextWrapper(object):
+ """
+ Class for wrapping contexts so that they are passed through in a
+ with statement.
+ """
+ def __init__(self, context):
+ self.context = context
+
+ def __enter__(self):
+ return self.context
+
+ def __exit__(*args, **kwargs):
+ pass
+
+# vim:ts=4:sw=4:et
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/tz/win.py b/appWSM/lib/python2.7/site-packages/dateutil/tz/win.py
new file mode 100644
index 0000000000000000000000000000000000000000..36a1c263ac62b93add8f4be90874014a5b4e3c75
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/tz/win.py
@@ -0,0 +1,332 @@
+# This code was originally contributed by Jeffrey Harris.
+import datetime
+import struct
+
+from six.moves import winreg
+from six import text_type
+
+try:
+ import ctypes
+ from ctypes import wintypes
+except ValueError:
+ # ValueError is raised on non-Windows systems for some horrible reason.
+ raise ImportError("Running tzwin on non-Windows system")
+
+from ._common import tzrangebase
+
+__all__ = ["tzwin", "tzwinlocal", "tzres"]
+
+ONEWEEK = datetime.timedelta(7)
+
+TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
+TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
+TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
+
+
+def _settzkeyname():
+ handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+ try:
+ winreg.OpenKey(handle, TZKEYNAMENT).Close()
+ TZKEYNAME = TZKEYNAMENT
+ except WindowsError:
+ TZKEYNAME = TZKEYNAME9X
+ handle.Close()
+ return TZKEYNAME
+
+
+TZKEYNAME = _settzkeyname()
+
+
+class tzres(object):
+ """
+ Class for accessing `tzres.dll`, which contains timezone name related
+ resources.
+
+ .. versionadded:: 2.5.0
+ """
+ p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
+
+ def __init__(self, tzres_loc='tzres.dll'):
+ # Load the user32 DLL so we can load strings from tzres
+ user32 = ctypes.WinDLL('user32')
+
+ # Specify the LoadStringW function
+ user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
+ wintypes.UINT,
+ wintypes.LPWSTR,
+ ctypes.c_int)
+
+ self.LoadStringW = user32.LoadStringW
+ self._tzres = ctypes.WinDLL(tzres_loc)
+ self.tzres_loc = tzres_loc
+
+ def load_name(self, offset):
+ """
+ Load a timezone name from a DLL offset (integer).
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.load_name(112))
+ 'Eastern Standard Time'
+
+ :param offset:
+ A positive integer value referring to a string from the tzres dll.
+
+ ..note:
+ Offsets found in the registry are generally of the form
+ `@tzres.dll,-114`. The offset in this case if 114, not -114.
+
+ """
+ resource = self.p_wchar()
+ lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
+ nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
+ return resource[:nchar]
+
+ def name_from_string(self, tzname_str):
+ """
+ Parse strings as returned from the Windows registry into the time zone
+ name as defined in the registry.
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.name_from_string('@tzres.dll,-251'))
+ 'Dateline Daylight Time'
+ >>> print(tzr.name_from_string('Eastern Standard Time'))
+ 'Eastern Standard Time'
+
+ :param tzname_str:
+ A timezone name string as returned from a Windows registry key.
+
+ :return:
+ Returns the localized timezone string from tzres.dll if the string
+ is of the form `@tzres.dll,-offset`, else returns the input string.
+ """
+ if not tzname_str.startswith('@'):
+ return tzname_str
+
+ name_splt = tzname_str.split(',-')
+ try:
+ offset = int(name_splt[1])
+ except:
+ raise ValueError("Malformed timezone string.")
+
+ return self.load_name(offset)
+
+
+class tzwinbase(tzrangebase):
+ """tzinfo class based on win32's timezones available in the registry."""
+ def __init__(self):
+ raise NotImplementedError('tzwinbase is an abstract base class')
+
+ def __eq__(self, other):
+ # Compare on all relevant dimensions, including name.
+ if not isinstance(other, tzwinbase):
+ return NotImplemented
+
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._stddayofweek == other._stddayofweek and
+ self._dstdayofweek == other._dstdayofweek and
+ self._stdweeknumber == other._stdweeknumber and
+ self._dstweeknumber == other._dstweeknumber and
+ self._stdhour == other._stdhour and
+ self._dsthour == other._dsthour and
+ self._stdminute == other._stdminute and
+ self._dstminute == other._dstminute and
+ self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr)
+
+ @staticmethod
+ def list():
+ """Return a list of all time zones known to the system."""
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
+ result = [winreg.EnumKey(tzkey, i)
+ for i in range(winreg.QueryInfoKey(tzkey)[0])]
+ return result
+
+ def display(self):
+ return self._display
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+
+ if not self.hasdst:
+ return None
+
+ dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
+ self._dsthour, self._dstminute,
+ self._dstweeknumber)
+
+ dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
+ self._stdhour, self._stdminute,
+ self._stdweeknumber)
+
+ # Ambiguous dates default to the STD side
+ dstoff -= self._dst_base_offset
+
+ return dston, dstoff
+
+ def _get_hasdst(self):
+ return self._dstmonth != 0
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+class tzwin(tzwinbase):
+
+ def __init__(self, name):
+ self._name = name
+
+ # multiple contexts only possible in 2.7 and 3.1, we still support 2.6
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ keydict = valuestodict(tzkey)
+
+ self._std_abbr = keydict["Std"]
+ self._dst_abbr = keydict["Dlt"]
+
+ self._display = keydict["Display"]
+
+ # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
+ tup = struct.unpack("=3l16h", keydict["TZI"])
+ stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
+ dstoffset = stdoffset-tup[2] # + DaylightBias * -1
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
+ # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
+ (self._stdmonth,
+ self._stddayofweek, # Sunday = 0
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[4:9]
+
+ (self._dstmonth,
+ self._dstdayofweek, # Sunday = 0
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[12:17]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwin(%s)" % repr(self._name)
+
+ def __reduce__(self):
+ return (self.__class__, (self._name,))
+
+
+class tzwinlocal(tzwinbase):
+ def __init__(self):
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
+ keydict = valuestodict(tzlocalkey)
+
+ self._std_abbr = keydict["StandardName"]
+ self._dst_abbr = keydict["DaylightName"]
+
+ try:
+ tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
+ sn=self._std_abbr)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ _keydict = valuestodict(tzkey)
+ self._display = _keydict["Display"]
+ except OSError:
+ self._display = None
+
+ stdoffset = -keydict["Bias"]-keydict["StandardBias"]
+ dstoffset = stdoffset-keydict["DaylightBias"]
+
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # For reasons unclear, in this particular key, the day of week has been
+ # moved to the END of the SYSTEMTIME structure.
+ tup = struct.unpack("=8h", keydict["StandardStart"])
+
+ (self._stdmonth,
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[1:5]
+
+ self._stddayofweek = tup[7]
+
+ tup = struct.unpack("=8h", keydict["DaylightStart"])
+
+ (self._dstmonth,
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[1:5]
+
+ self._dstdayofweek = tup[7]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwinlocal()"
+
+ def __str__(self):
+ # str will return the standard name, not the daylight name.
+ return "tzwinlocal(%s)" % repr(self._std_abbr)
+
+ def __reduce__(self):
+ return (self.__class__, ())
+
+
+def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
+ """ dayofweek == 0 means Sunday, whichweek 5 means last instance """
+ first = datetime.datetime(year, month, 1, hour, minute)
+
+ # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
+ # Because 7 % 7 = 0
+ weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
+ wd = weekdayone + ((whichweek - 1) * ONEWEEK)
+ if (wd.month != month):
+ wd -= ONEWEEK
+
+ return wd
+
+
+def valuestodict(key):
+ """Convert a registry key's values to a dictionary."""
+ dout = {}
+ size = winreg.QueryInfoKey(key)[1]
+ tz_res = None
+
+ for i in range(size):
+ key_name, value, dtype = winreg.EnumValue(key, i)
+ if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
+ # If it's a DWORD (32-bit integer), it's stored as unsigned - convert
+ # that to a proper signed integer
+ if value & (1 << 31):
+ value = value - (1 << 32)
+ elif dtype == winreg.REG_SZ:
+ # If it's a reference to the tzres DLL, load the actual string
+ if value.startswith('@tzres'):
+ tz_res = tz_res or tzres()
+ value = tz_res.name_from_string(value)
+
+ value = value.rstrip('\x00') # Remove trailing nulls
+
+ dout[key_name] = value
+
+ return dout
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/tzwin.py b/appWSM/lib/python2.7/site-packages/dateutil/tzwin.py
new file mode 100644
index 0000000000000000000000000000000000000000..cebc673e40fc376653ebf037e96f0a6d0b33e906
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/tzwin.py
@@ -0,0 +1,2 @@
+# tzwin has moved to dateutil.tz.win
+from .tz.win import *
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2ed4f9d61474af7115b4a92329de3c6fbe6487a
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+import warnings
+import json
+
+from tarfile import TarFile
+from pkgutil import get_data
+from io import BytesIO
+from contextlib import closing
+
+from dateutil.tz import tzfile
+
+__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
+
+ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
+METADATA_FN = 'METADATA'
+
+# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
+# it's close enough for python2.6
+tar_open = TarFile.open
+if not hasattr(TarFile, '__exit__'):
+ def tar_open(*args, **kwargs):
+ return closing(TarFile.open(*args, **kwargs))
+
+
+class tzfile(tzfile):
+ def __reduce__(self):
+ return (gettz, (self._filename,))
+
+
+def getzoneinfofile_stream():
+ try:
+ return BytesIO(get_data(__name__, ZONEFILENAME))
+ except IOError as e: # TODO switch to FileNotFoundError?
+ warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
+ return None
+
+
+class ZoneInfoFile(object):
+ def __init__(self, zonefile_stream=None):
+ if zonefile_stream is not None:
+ with tar_open(fileobj=zonefile_stream, mode='r') as tf:
+ # dict comprehension does not work on python2.6
+ # TODO: get back to the nicer syntax when we ditch python2.6
+ # self.zones = {zf.name: tzfile(tf.extractfile(zf),
+ # filename = zf.name)
+ # for zf in tf.getmembers() if zf.isfile()}
+ self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
+ filename=zf.name))
+ for zf in tf.getmembers()
+ if zf.isfile() and zf.name != METADATA_FN)
+ # deal with links: They'll point to their parent object. Less
+ # waste of memory
+ # links = {zl.name: self.zones[zl.linkname]
+ # for zl in tf.getmembers() if zl.islnk() or zl.issym()}
+ links = dict((zl.name, self.zones[zl.linkname])
+ for zl in tf.getmembers() if
+ zl.islnk() or zl.issym())
+ self.zones.update(links)
+ try:
+ metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
+ metadata_str = metadata_json.read().decode('UTF-8')
+ self.metadata = json.loads(metadata_str)
+ except KeyError:
+ # no metadata in tar file
+ self.metadata = None
+ else:
+ self.zones = dict()
+ self.metadata = None
+
+ def get(self, name, default=None):
+ """
+ Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
+ for retrieving zones from the zone dictionary.
+
+ :param name:
+ The name of the zone to retrieve. (Generally IANA zone names)
+
+ :param default:
+ The value to return in the event of a missing key.
+
+ .. versionadded:: 2.6.0
+
+ """
+ return self.zones.get(name, default)
+
+
+# The current API has gettz as a module function, although in fact it taps into
+# a stateful class. So as a workaround for now, without changing the API, we
+# will create a new "global" class instance the first time a user requests a
+# timezone. Ugly, but adheres to the api.
+#
+# TODO: Remove after deprecation period.
+_CLASS_ZONE_INSTANCE = list()
+
+
+def get_zonefile_instance(new_instance=False):
+ """
+ This is a convenience function which provides a :class:`ZoneInfoFile`
+ instance using the data provided by the ``dateutil`` package. By default, it
+ caches a single instance of the ZoneInfoFile object and returns that.
+
+ :param new_instance:
+ If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
+ used as the cached instance for the next call. Otherwise, new instances
+ are created only as necessary.
+
+ :return:
+ Returns a :class:`ZoneInfoFile` object.
+
+ .. versionadded:: 2.6
+ """
+ if new_instance:
+ zif = None
+ else:
+ zif = getattr(get_zonefile_instance, '_cached_instance', None)
+
+ if zif is None:
+ zif = ZoneInfoFile(getzoneinfofile_stream())
+
+ get_zonefile_instance._cached_instance = zif
+
+ return zif
+
+
+def gettz(name):
+ """
+ This retrieves a time zone from the local zoneinfo tarball that is packaged
+ with dateutil.
+
+ :param name:
+ An IANA-style time zone name, as found in the zoneinfo file.
+
+ :return:
+ Returns a :class:`dateutil.tz.tzfile` time zone object.
+
+ .. warning::
+ It is generally inadvisable to use this function, and it is only
+ provided for API compatibility with earlier versions. This is *not*
+ equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
+ time zone based on the inputs, favoring system zoneinfo. This is ONLY
+ for accessing the dateutil-specific zoneinfo (which may be out of
+ date compared to the system zoneinfo).
+
+ .. deprecated:: 2.6
+ If you need to use a specific zoneinfofile over the system zoneinfo,
+ instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
+ :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
+
+ Use :func:`get_zonefile_instance` to retrieve an instance of the
+ dateutil-provided zoneinfo.
+ """
+ warnings.warn("zoneinfo.gettz() will be removed in future versions, "
+ "to use the dateutil-provided zoneinfo files, instantiate a "
+ "ZoneInfoFile object and use ZoneInfoFile.zones.get() "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].zones.get(name)
+
+
+def gettz_db_metadata():
+ """ Get the zonefile metadata
+
+ See `zonefile_metadata`_
+
+ :returns:
+ A dictionary with the database metadata
+
+ .. deprecated:: 2.6
+ See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
+ query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
+ """
+ warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
+ "versions, to use the dateutil-provided zoneinfo files, "
+ "ZoneInfoFile object and query the 'metadata' attribute "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].metadata
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..613c0ff3b4ef910935629a7d145354f40314150c
Binary files /dev/null and b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ
diff --git a/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d53bb8807c46aef8aa18125d955e04d39456fe1
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py
@@ -0,0 +1,52 @@
+import logging
+import os
+import tempfile
+import shutil
+import json
+from subprocess import check_call
+
+from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME
+
+
+def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
+ """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
+
+ filename is the timezone tarball from ftp.iana.org/tz.
+
+ """
+ tmpdir = tempfile.mkdtemp()
+ zonedir = os.path.join(tmpdir, "zoneinfo")
+ moduledir = os.path.dirname(__file__)
+ try:
+ with tar_open(filename) as tf:
+ for name in zonegroups:
+ tf.extract(name, tmpdir)
+ filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
+ try:
+ check_call(["zic", "-d", zonedir] + filepaths)
+ except OSError as e:
+ _print_on_nosuchfile(e)
+ raise
+ # write metadata file
+ with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
+ json.dump(metadata, f, indent=4, sort_keys=True)
+ target = os.path.join(moduledir, ZONEFILENAME)
+ with tar_open(target, "w:%s" % format) as tf:
+ for entry in os.listdir(zonedir):
+ entrypath = os.path.join(zonedir, entry)
+ tf.add(entrypath, entry)
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+def _print_on_nosuchfile(e):
+ """Print helpful troubleshooting message
+
+ e is an exception raised by subprocess.check_call()
+
+ """
+ if e.errno == 2:
+ logging.error(
+ "Could not find zic. Perhaps you need to install "
+ "libc-bin or some other package that provides it, "
+ "or it's not in your PATH?")
diff --git a/appWSM/lib/python2.7/site-packages/easy_install.py b/appWSM/lib/python2.7/site-packages/easy_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..d87e984034b6e6e9eb456ebcb2b3f420c07a48bc
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+ from setuptools.command.easy_install import main
+ main()
diff --git a/appWSM/lib/python2.7/site-packages/flask/__init__.py b/appWSM/lib/python2.7/site-packages/flask/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9a873facd9f3d030e9ff66a7a019591d9317dd3
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+ flask
+ ~~~~~
+
+ A microframework based on Werkzeug. It's extensively documented
+ and follows best practice patterns.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+__version__ = '0.12.2'
+
+# utilities we import from Werkzeug and Jinja2 that are unused
+# in the module but are exported as public interface.
+from werkzeug.exceptions import abort
+from werkzeug.utils import redirect
+from jinja2 import Markup, escape
+
+from .app import Flask, Request, Response
+from .config import Config
+from .helpers import url_for, flash, send_file, send_from_directory, \
+ get_flashed_messages, get_template_attribute, make_response, safe_join, \
+ stream_with_context
+from .globals import current_app, g, request, session, _request_ctx_stack, \
+ _app_ctx_stack
+from .ctx import has_request_context, has_app_context, \
+ after_this_request, copy_current_request_context
+from .blueprints import Blueprint
+from .templating import render_template, render_template_string
+
+# the signals
+from .signals import signals_available, template_rendered, request_started, \
+ request_finished, got_request_exception, request_tearing_down, \
+ appcontext_tearing_down, appcontext_pushed, \
+ appcontext_popped, message_flashed, before_render_template
+
+# We're not exposing the actual json module but a convenient wrapper around
+# it.
+from . import json
+
+# This was the only thing that Flask used to export at one point and it had
+# a more generic name.
+jsonify = json.jsonify
+
+# backwards compat, goes away in 1.0
+from .sessions import SecureCookieSession as Session
+json_available = True
diff --git a/appWSM/lib/python2.7/site-packages/flask/__main__.py b/appWSM/lib/python2.7/site-packages/flask/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbefccd27d24c8e7de6b340126af70315b3dd235
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/__main__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.__main__
+ ~~~~~~~~~~~~~~
+
+ Alias for flask.run for the command line.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+if __name__ == '__main__':
+ from .cli import main
+ main(as_module=True)
diff --git a/appWSM/lib/python2.7/site-packages/flask/_compat.py b/appWSM/lib/python2.7/site-packages/flask/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..071628fc1148e435b70084d31216605631c2d2e7
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/_compat.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+"""
+ flask._compat
+ ~~~~~~~~~~~~~
+
+ Some py2/py3 compatibility support based on a stripped down
+ version of six so we don't have to depend on a specific version
+ of it.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+_identity = lambda x: x
+
+
+if not PY2:
+ text_type = str
+ string_types = (str,)
+ integer_types = (int,)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+
+ from io import StringIO
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+ implements_to_string = _identity
+
+else:
+ text_type = unicode
+ string_types = (str, unicode)
+ integer_types = (int, long)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+
+ from cStringIO import StringIO
+
+ exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+ def implements_to_string(cls):
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ return cls
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a
+ # dummy metaclass for one level of class instantiation that replaces
+ # itself with the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+# Certain versions of pypy have a bug where clearing the exception stack
+# breaks the __exit__ function in a very peculiar way. The second level of
+# exception blocks is necessary because pypy seems to forget to check if an
+# exception happened until the next bytecode instruction?
+#
+# Relevant PyPy bugfix commit:
+# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301
+# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later
+# versions.
+#
+# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug.
+BROKEN_PYPY_CTXMGR_EXIT = False
+if hasattr(sys, 'pypy_version_info'):
+ class _Mgr(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, *args):
+ if hasattr(sys, 'exc_clear'):
+ # Python 3 (PyPy3) doesn't have exc_clear
+ sys.exc_clear()
+ try:
+ try:
+ with _Mgr():
+ raise AssertionError()
+ except:
+ raise
+ except TypeError:
+ BROKEN_PYPY_CTXMGR_EXIT = True
+ except AssertionError:
+ pass
diff --git a/appWSM/lib/python2.7/site-packages/flask/app.py b/appWSM/lib/python2.7/site-packages/flask/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..1404e17e74820a5909d4e0663a9889f814c82ed3
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/app.py
@@ -0,0 +1,2003 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.app
+ ~~~~~~~~~
+
+ This module implements the central WSGI application object.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+from threading import Lock
+from datetime import timedelta
+from itertools import chain
+from functools import update_wrapper
+
+from werkzeug.datastructures import ImmutableDict
+from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
+from werkzeug.exceptions import HTTPException, InternalServerError, \
+ MethodNotAllowed, BadRequest, default_exceptions
+
+from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
+ locked_cached_property, _endpoint_from_view_func, find_package, \
+ get_debug_flag
+from . import json, cli
+from .wrappers import Request, Response
+from .config import ConfigAttribute, Config
+from .ctx import RequestContext, AppContext, _AppCtxGlobals
+from .globals import _request_ctx_stack, request, session, g
+from .sessions import SecureCookieSessionInterface
+from .templating import DispatchingJinjaLoader, Environment, \
+ _default_template_ctx_processor
+from .signals import request_started, request_finished, got_request_exception, \
+ request_tearing_down, appcontext_tearing_down
+from ._compat import reraise, string_types, text_type, integer_types
+
+# a lock used for logger initialization
+_logger_lock = Lock()
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+
+def _make_timedelta(value):
+ if not isinstance(value, timedelta):
+ return timedelta(seconds=value)
+ return value
+
+
+def setupmethod(f):
+ """Wraps a method so that it performs a check in debug mode if the
+ first request was already handled.
+ """
+ def wrapper_func(self, *args, **kwargs):
+ if self.debug and self._got_first_request:
+ raise AssertionError('A setup function was called after the '
+ 'first request was handled. This usually indicates a bug '
+ 'in the application where a module was not imported '
+ 'and decorators or other functionality was called too late.\n'
+ 'To fix this make sure to import all your view modules, '
+ 'database models and everything related at a central place '
+ 'before the application starts serving requests.')
+ return f(self, *args, **kwargs)
+ return update_wrapper(wrapper_func, f)
+
+
+class Flask(_PackageBoundObject):
+ """The flask object implements a WSGI application and acts as the central
+ object. It is passed the name of the module or package of the
+ application. Once it is created it will act as a central registry for
+ the view functions, the URL rules, template configuration and much more.
+
+ The name of the package is used to resolve resources from inside the
+ package or the folder the module is contained in depending on if the
+ package parameter resolves to an actual python package (a folder with
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+ For more information about resource loading, see :func:`open_resource`.
+
+ Usually you create a :class:`Flask` instance in your main module or
+ in the :file:`__init__.py` file of your package like this::
+
+ from flask import Flask
+ app = Flask(__name__)
+
+ .. admonition:: About the First Parameter
+
+ The idea of the first parameter is to give Flask an idea of what
+ belongs to your application. This name is used to find resources
+ on the filesystem, can be used by extensions to improve debugging
+ information and a lot more.
+
+ So it's important what you provide there. If you are using a single
+ module, `__name__` is always the correct value. If you however are
+ using a package, it's usually recommended to hardcode the name of
+ your package there.
+
+ For example if your application is defined in :file:`yourapplication/app.py`
+ you should create it with one of the two versions below::
+
+ app = Flask('yourapplication')
+ app = Flask(__name__.split('.')[0])
+
+ Why is that? The application will work even with `__name__`, thanks
+ to how resources are looked up. However it will make debugging more
+ painful. Certain extensions can make assumptions based on the
+ import name of your application. For example the Flask-SQLAlchemy
+ extension will look for the code in your application that triggered
+ an SQL query in debug mode. If the import name is not properly set
+ up, that debugging information is lost. (For example it would only
+ pick up SQL queries in `yourapplication.app` and not
+ `yourapplication.views.frontend`)
+
+ .. versionadded:: 0.7
+ The `static_url_path`, `static_folder`, and `template_folder`
+ parameters were added.
+
+ .. versionadded:: 0.8
+ The `instance_path` and `instance_relative_config` parameters were
+ added.
+
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
+ :param import_name: the name of the application package
+ :param static_url_path: can be used to specify a different path for the
+ static files on the web. Defaults to the name
+ of the `static_folder` folder.
+ :param static_folder: the folder with static files that should be served
+ at `static_url_path`. Defaults to the ``'static'``
+ folder in the root path of the application.
+ :param template_folder: the folder that contains the templates that should
+ be used by the application. Defaults to
+ ``'templates'`` folder in the root path of the
+ application.
+ :param instance_path: An alternative instance path for the application.
+ By default the folder ``'instance'`` next to the
+ package or module is assumed to be the instance
+ path.
+ :param instance_relative_config: if set to ``True`` relative filenames
+ for loading the config are assumed to
+ be relative to the instance path instead
+ of the application root.
+ :param root_path: Flask by default will automatically calculate the path
+ to the root of the application. In certain situations
+ this cannot be achieved (for instance if the package
+ is a Python 3 namespace package) and needs to be
+ manually defined.
+ """
+
+ #: The class that is used for request objects. See :class:`~flask.Request`
+ #: for more information.
+ request_class = Request
+
+ #: The class that is used for response objects. See
+ #: :class:`~flask.Response` for more information.
+ response_class = Response
+
+ #: The class that is used for the Jinja environment.
+ #:
+ #: .. versionadded:: 0.11
+ jinja_environment = Environment
+
+ #: The class that is used for the :data:`~flask.g` instance.
+ #:
+ #: Example use cases for a custom class:
+ #:
+ #: 1. Store arbitrary attributes on flask.g.
+ #: 2. Add a property for lazy per-request database connectors.
+ #: 3. Return None instead of AttributeError on unexpected attributes.
+ #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
+ #:
+ #: In Flask 0.9 this property was called `request_globals_class` but it
+ #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
+ #: flask.g object is now application context scoped.
+ #:
+ #: .. versionadded:: 0.10
+ app_ctx_globals_class = _AppCtxGlobals
+
+ # Backwards compatibility support
+ def _get_request_globals_class(self):
+ return self.app_ctx_globals_class
+ def _set_request_globals_class(self, value):
+ from warnings import warn
+ warn(DeprecationWarning('request_globals_class attribute is now '
+ 'called app_ctx_globals_class'))
+ self.app_ctx_globals_class = value
+ request_globals_class = property(_get_request_globals_class,
+ _set_request_globals_class)
+ del _get_request_globals_class, _set_request_globals_class
+
+ #: The class that is used for the ``config`` attribute of this app.
+ #: Defaults to :class:`~flask.Config`.
+ #:
+ #: Example use cases for a custom class:
+ #:
+ #: 1. Default values for certain config options.
+ #: 2. Access to config values through attributes in addition to keys.
+ #:
+ #: .. versionadded:: 0.11
+ config_class = Config
+
+ #: The debug flag. Set this to ``True`` to enable debugging of the
+ #: application. In debug mode the debugger will kick in when an unhandled
+ #: exception occurs and the integrated server will automatically reload
+ #: the application if changes in the code are detected.
+ #:
+ #: This attribute can also be configured from the config with the ``DEBUG``
+ #: configuration key. Defaults to ``False``.
+ debug = ConfigAttribute('DEBUG')
+
+ #: The testing flag. Set this to ``True`` to enable the test mode of
+ #: Flask extensions (and in the future probably also Flask itself).
+ #: For example this might activate unittest helpers that have an
+ #: additional runtime cost which should not be enabled by default.
+ #:
+ #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
+ #: default it's implicitly enabled.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``TESTING`` configuration key. Defaults to ``False``.
+ testing = ConfigAttribute('TESTING')
+
+ #: If a secret key is set, cryptographic components can use this to
+ #: sign cookies and other things. Set this to a complex random value
+ #: when you want to use the secure cookie for instance.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``SECRET_KEY`` configuration key. Defaults to ``None``.
+ secret_key = ConfigAttribute('SECRET_KEY')
+
+ #: The secure cookie uses this for the name of the session cookie.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
+ session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
+
+ #: A :class:`~datetime.timedelta` which is used to set the expiration
+ #: date of a permanent session. The default is 31 days which makes a
+ #: permanent session survive for roughly one month.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
+ #: ``timedelta(days=31)``
+ permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
+ get_converter=_make_timedelta)
+
+ #: A :class:`~datetime.timedelta` which is used as default cache_timeout
+ #: for the :func:`send_file` functions. The default is 12 hours.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
+ #: variable can also be set with an integer value used as seconds.
+ #: Defaults to ``timedelta(hours=12)``
+ send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
+ get_converter=_make_timedelta)
+
+ #: Enable this if you want to use the X-Sendfile feature. Keep in
+ #: mind that the server has to support this. This only affects files
+ #: sent with the :func:`send_file` method.
+ #:
+ #: .. versionadded:: 0.2
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
+ use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
+
+ #: The name of the logger to use. By default the logger name is the
+ #: package name passed to the constructor.
+ #:
+ #: .. versionadded:: 0.4
+ logger_name = ConfigAttribute('LOGGER_NAME')
+
+ #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
+ #:
+ #: .. versionadded:: 0.10
+ json_encoder = json.JSONEncoder
+
+ #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
+ #:
+ #: .. versionadded:: 0.10
+ json_decoder = json.JSONDecoder
+
+ #: Options that are passed directly to the Jinja2 environment.
+ jinja_options = ImmutableDict(
+ extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
+ )
+
+ #: Default configuration parameters.
+ default_config = ImmutableDict({
+ 'DEBUG': get_debug_flag(default=False),
+ 'TESTING': False,
+ 'PROPAGATE_EXCEPTIONS': None,
+ 'PRESERVE_CONTEXT_ON_EXCEPTION': None,
+ 'SECRET_KEY': None,
+ 'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
+ 'USE_X_SENDFILE': False,
+ 'LOGGER_NAME': None,
+ 'LOGGER_HANDLER_POLICY': 'always',
+ 'SERVER_NAME': None,
+ 'APPLICATION_ROOT': None,
+ 'SESSION_COOKIE_NAME': 'session',
+ 'SESSION_COOKIE_DOMAIN': None,
+ 'SESSION_COOKIE_PATH': None,
+ 'SESSION_COOKIE_HTTPONLY': True,
+ 'SESSION_COOKIE_SECURE': False,
+ 'SESSION_REFRESH_EACH_REQUEST': True,
+ 'MAX_CONTENT_LENGTH': None,
+ 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
+ 'TRAP_BAD_REQUEST_ERRORS': False,
+ 'TRAP_HTTP_EXCEPTIONS': False,
+ 'EXPLAIN_TEMPLATE_LOADING': False,
+ 'PREFERRED_URL_SCHEME': 'http',
+ 'JSON_AS_ASCII': True,
+ 'JSON_SORT_KEYS': True,
+ 'JSONIFY_PRETTYPRINT_REGULAR': True,
+ 'JSONIFY_MIMETYPE': 'application/json',
+ 'TEMPLATES_AUTO_RELOAD': None,
+ })
+
+ #: The rule object to use for URL rules created. This is used by
+ #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
+ #:
+ #: .. versionadded:: 0.7
+ url_rule_class = Rule
+
+ #: the test client that is used with when `test_client` is used.
+ #:
+ #: .. versionadded:: 0.7
+ test_client_class = None
+
+ #: the session interface to use. By default an instance of
+ #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
+ #:
+ #: .. versionadded:: 0.8
+ session_interface = SecureCookieSessionInterface()
+
+ def __init__(self, import_name, static_path=None, static_url_path=None,
+ static_folder='static', template_folder='templates',
+ instance_path=None, instance_relative_config=False,
+ root_path=None):
+ _PackageBoundObject.__init__(self, import_name,
+ template_folder=template_folder,
+ root_path=root_path)
+ if static_path is not None:
+ from warnings import warn
+ warn(DeprecationWarning('static_path is now called '
+ 'static_url_path'), stacklevel=2)
+ static_url_path = static_path
+
+ if static_url_path is not None:
+ self.static_url_path = static_url_path
+ if static_folder is not None:
+ self.static_folder = static_folder
+ if instance_path is None:
+ instance_path = self.auto_find_instance_path()
+ elif not os.path.isabs(instance_path):
+ raise ValueError('If an instance path is provided it must be '
+ 'absolute. A relative path was given instead.')
+
+ #: Holds the path to the instance folder.
+ #:
+ #: .. versionadded:: 0.8
+ self.instance_path = instance_path
+
+ #: The configuration dictionary as :class:`Config`. This behaves
+ #: exactly like a regular dictionary but supports additional methods
+ #: to load a config from files.
+ self.config = self.make_config(instance_relative_config)
+
+ # Prepare the deferred setup of the logger.
+ self._logger = None
+ self.logger_name = self.import_name
+
+ #: A dictionary of all view functions registered. The keys will
+ #: be function names which are also used to generate URLs and
+ #: the values are the function objects themselves.
+ #: To register a view function, use the :meth:`route` decorator.
+ self.view_functions = {}
+
+ # support for the now deprecated `error_handlers` attribute. The
+ # :attr:`error_handler_spec` shall be used now.
+ self._error_handlers = {}
+
+ #: A dictionary of all registered error handlers. The key is ``None``
+ #: for error handlers active on the application, otherwise the key is
+ #: the name of the blueprint. Each key points to another dictionary
+ #: where the key is the status code of the http exception. The
+ #: special key ``None`` points to a list of tuples where the first item
+ #: is the class for the instance check and the second the error handler
+ #: function.
+ #:
+ #: To register a error handler, use the :meth:`errorhandler`
+ #: decorator.
+ self.error_handler_spec = {None: self._error_handlers}
+
+ #: A list of functions that are called when :meth:`url_for` raises a
+ #: :exc:`~werkzeug.routing.BuildError`. Each function registered here
+ #: is called with `error`, `endpoint` and `values`. If a function
+ #: returns ``None`` or raises a :exc:`BuildError` the next function is
+ #: tried.
+ #:
+ #: .. versionadded:: 0.9
+ self.url_build_error_handlers = []
+
+ #: A dictionary with lists of functions that should be called at the
+ #: beginning of the request. The key of the dictionary is the name of
+ #: the blueprint this function is active for, ``None`` for all requests.
+ #: This can for example be used to open database connections or
+ #: getting hold of the currently logged in user. To register a
+ #: function here, use the :meth:`before_request` decorator.
+ self.before_request_funcs = {}
+
+ #: A lists of functions that should be called at the beginning of the
+ #: first request to this instance. To register a function here, use
+ #: the :meth:`before_first_request` decorator.
+ #:
+ #: .. versionadded:: 0.8
+ self.before_first_request_funcs = []
+
+ #: A dictionary with lists of functions that should be called after
+ #: each request. The key of the dictionary is the name of the blueprint
+ #: this function is active for, ``None`` for all requests. This can for
+ #: example be used to close database connections. To register a function
+ #: here, use the :meth:`after_request` decorator.
+ self.after_request_funcs = {}
+
+ #: A dictionary with lists of functions that are called after
+ #: each request, even if an exception has occurred. The key of the
+ #: dictionary is the name of the blueprint this function is active for,
+ #: ``None`` for all requests. These functions are not allowed to modify
+ #: the request, and their return values are ignored. If an exception
+ #: occurred while processing the request, it gets passed to each
+ #: teardown_request function. To register a function here, use the
+ #: :meth:`teardown_request` decorator.
+ #:
+ #: .. versionadded:: 0.7
+ self.teardown_request_funcs = {}
+
+ #: A list of functions that are called when the application context
+ #: is destroyed. Since the application context is also torn down
+ #: if the request ends this is the place to store code that disconnects
+ #: from databases.
+ #:
+ #: .. versionadded:: 0.9
+ self.teardown_appcontext_funcs = []
+
+ #: A dictionary with lists of functions that can be used as URL
+ #: value processor functions. Whenever a URL is built these functions
+ #: are called to modify the dictionary of values in place. The key
+ #: ``None`` here is used for application wide
+ #: callbacks, otherwise the key is the name of the blueprint.
+ #: Each of these functions has the chance to modify the dictionary
+ #:
+ #: .. versionadded:: 0.7
+ self.url_value_preprocessors = {}
+
+ #: A dictionary with lists of functions that can be used as URL value
+ #: preprocessors. The key ``None`` here is used for application wide
+ #: callbacks, otherwise the key is the name of the blueprint.
+ #: Each of these functions has the chance to modify the dictionary
+ #: of URL values before they are used as the keyword arguments of the
+ #: view function. For each function registered this one should also
+ #: provide a :meth:`url_defaults` function that adds the parameters
+ #: automatically again that were removed that way.
+ #:
+ #: .. versionadded:: 0.7
+ self.url_default_functions = {}
+
+ #: A dictionary with list of functions that are called without argument
+ #: to populate the template context. The key of the dictionary is the
+ #: name of the blueprint this function is active for, ``None`` for all
+ #: requests. Each returns a dictionary that the template context is
+ #: updated with. To register a function here, use the
+ #: :meth:`context_processor` decorator.
+ self.template_context_processors = {
+ None: [_default_template_ctx_processor]
+ }
+
+ #: A list of shell context processor functions that should be run
+ #: when a shell context is created.
+ #:
+ #: .. versionadded:: 0.11
+ self.shell_context_processors = []
+
+ #: all the attached blueprints in a dictionary by name. Blueprints
+ #: can be attached multiple times so this dictionary does not tell
+ #: you how often they got attached.
+ #:
+ #: .. versionadded:: 0.7
+ self.blueprints = {}
+ self._blueprint_order = []
+
+ #: a place where extensions can store application specific state. For
+ #: example this is where an extension could store database engines and
+ #: similar things. For backwards compatibility extensions should register
+ #: themselves like this::
+ #:
+ #: if not hasattr(app, 'extensions'):
+ #: app.extensions = {}
+ #: app.extensions['extensionname'] = SomeObject()
+ #:
+ #: The key must match the name of the extension module. For example in
+ #: case of a "Flask-Foo" extension in `flask_foo`, the key would be
+ #: ``'foo'``.
+ #:
+ #: .. versionadded:: 0.7
+ self.extensions = {}
+
+ #: The :class:`~werkzeug.routing.Map` for this instance. You can use
+ #: this to change the routing converters after the class was created
+ #: but before any routes are connected. Example::
+ #:
+ #: from werkzeug.routing import BaseConverter
+ #:
+ #: class ListConverter(BaseConverter):
+ #: def to_python(self, value):
+ #: return value.split(',')
+ #: def to_url(self, values):
+ #: return ','.join(super(ListConverter, self).to_url(value)
+ #: for value in values)
+ #:
+ #: app = Flask(__name__)
+ #: app.url_map.converters['list'] = ListConverter
+ self.url_map = Map()
+
+ # tracks internally if the application already handled at least one
+ # request.
+ self._got_first_request = False
+ self._before_request_lock = Lock()
+
+ # register the static folder for the application. Do that even
+ # if the folder does not exist. First of all it might be created
+ # while the server is running (usually happens during development)
+ # but also because google appengine stores static files somewhere
+ # else when mapped with the .yml file.
+ if self.has_static_folder:
+ self.add_url_rule(self.static_url_path + '/',
+ endpoint='static',
+ view_func=self.send_static_file)
+
+ #: The click command line context for this application. Commands
+ #: registered here show up in the :command:`flask` command once the
+ #: application has been discovered. The default commands are
+ #: provided by Flask itself and can be overridden.
+ #:
+ #: This is an instance of a :class:`click.Group` object.
+ self.cli = cli.AppGroup(self.name)
+
+ def _get_error_handlers(self):
+ from warnings import warn
+ warn(DeprecationWarning('error_handlers is deprecated, use the '
+ 'new error_handler_spec attribute instead.'), stacklevel=1)
+ return self._error_handlers
+ def _set_error_handlers(self, value):
+ self._error_handlers = value
+ self.error_handler_spec[None] = value
+ error_handlers = property(_get_error_handlers, _set_error_handlers)
+ del _get_error_handlers, _set_error_handlers
+
+ @locked_cached_property
+ def name(self):
+ """The name of the application. This is usually the import name
+ with the difference that it's guessed from the run file if the
+ import name is main. This name is used as a display name when
+ Flask needs the name of the application. It can be set and overridden
+ to change the value.
+
+ .. versionadded:: 0.8
+ """
+ if self.import_name == '__main__':
+ fn = getattr(sys.modules['__main__'], '__file__', None)
+ if fn is None:
+ return '__main__'
+ return os.path.splitext(os.path.basename(fn))[0]
+ return self.import_name
+
+ @property
+ def propagate_exceptions(self):
+ """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
+ value in case it's set, otherwise a sensible default is returned.
+
+ .. versionadded:: 0.7
+ """
+ rv = self.config['PROPAGATE_EXCEPTIONS']
+ if rv is not None:
+ return rv
+ return self.testing or self.debug
+
+ @property
+ def preserve_context_on_exception(self):
+ """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
+ configuration value in case it's set, otherwise a sensible default
+ is returned.
+
+ .. versionadded:: 0.7
+ """
+ rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
+ if rv is not None:
+ return rv
+ return self.debug
+
+ @property
+ def logger(self):
+ """A :class:`logging.Logger` object for this application. The
+ default configuration is to log to stderr if the application is
+ in debug mode. This logger can be used to (surprise) log messages.
+ Here some examples::
+
+ app.logger.debug('A value for debugging')
+ app.logger.warning('A warning occurred (%d apples)', 42)
+ app.logger.error('An error occurred')
+
+ .. versionadded:: 0.3
+ """
+ if self._logger and self._logger.name == self.logger_name:
+ return self._logger
+ with _logger_lock:
+ if self._logger and self._logger.name == self.logger_name:
+ return self._logger
+ from flask.logging import create_logger
+ self._logger = rv = create_logger(self)
+ return rv
+
+ @locked_cached_property
+ def jinja_env(self):
+ """The Jinja2 environment used to load templates."""
+ return self.create_jinja_environment()
+
+ @property
+ def got_first_request(self):
+ """This attribute is set to ``True`` if the application started
+ handling the first request.
+
+ .. versionadded:: 0.8
+ """
+ return self._got_first_request
+
+ def make_config(self, instance_relative=False):
+ """Used to create the config attribute by the Flask constructor.
+ The `instance_relative` parameter is passed in from the constructor
+ of Flask (there named `instance_relative_config`) and indicates if
+ the config should be relative to the instance path or the root path
+ of the application.
+
+ .. versionadded:: 0.8
+ """
+ root_path = self.root_path
+ if instance_relative:
+ root_path = self.instance_path
+ return self.config_class(root_path, self.default_config)
+
+ def auto_find_instance_path(self):
+ """Tries to locate the instance path if it was not provided to the
+ constructor of the application class. It will basically calculate
+ the path to a folder named ``instance`` next to your main file or
+ the package.
+
+ .. versionadded:: 0.8
+ """
+ prefix, package_path = find_package(self.import_name)
+ if prefix is None:
+ return os.path.join(package_path, 'instance')
+ return os.path.join(prefix, 'var', self.name + '-instance')
+
+ def open_instance_resource(self, resource, mode='rb'):
+ """Opens a resource from the application's instance folder
+ (:attr:`instance_path`). Otherwise works like
+ :meth:`open_resource`. Instance resources can also be opened for
+ writing.
+
+ :param resource: the name of the resource. To access resources within
+ subfolders use forward slashes as separator.
+ :param mode: resource file opening mode, default is 'rb'.
+ """
+ return open(os.path.join(self.instance_path, resource), mode)
+
+ def create_jinja_environment(self):
+ """Creates the Jinja2 environment based on :attr:`jinja_options`
+ and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
+ the Jinja2 globals and filters after initialization. Override
+ this function to customize the behavior.
+
+ .. versionadded:: 0.5
+ .. versionchanged:: 0.11
+ ``Environment.auto_reload`` set in accordance with
+ ``TEMPLATES_AUTO_RELOAD`` configuration option.
+ """
+ options = dict(self.jinja_options)
+ if 'autoescape' not in options:
+ options['autoescape'] = self.select_jinja_autoescape
+ if 'auto_reload' not in options:
+ if self.config['TEMPLATES_AUTO_RELOAD'] is not None:
+ options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD']
+ else:
+ options['auto_reload'] = self.debug
+ rv = self.jinja_environment(self, **options)
+ rv.globals.update(
+ url_for=url_for,
+ get_flashed_messages=get_flashed_messages,
+ config=self.config,
+ # request, session and g are normally added with the
+ # context processor for efficiency reasons but for imported
+ # templates we also want the proxies in there.
+ request=request,
+ session=session,
+ g=g
+ )
+ rv.filters['tojson'] = json.tojson_filter
+ return rv
+
+ def create_global_jinja_loader(self):
+ """Creates the loader for the Jinja2 environment. Can be used to
+ override just the loader and keeping the rest unchanged. It's
+ discouraged to override this function. Instead one should override
+ the :meth:`jinja_loader` function instead.
+
+ The global loader dispatches between the loaders of the application
+ and the individual blueprints.
+
+ .. versionadded:: 0.7
+ """
+ return DispatchingJinjaLoader(self)
+
+ def init_jinja_globals(self):
+ """Deprecated. Used to initialize the Jinja2 globals.
+
+ .. versionadded:: 0.5
+ .. versionchanged:: 0.7
+ This method is deprecated with 0.7. Override
+ :meth:`create_jinja_environment` instead.
+ """
+
+ def select_jinja_autoescape(self, filename):
+ """Returns ``True`` if autoescaping should be active for the given
+ template name. If no template name is given, returns `True`.
+
+ .. versionadded:: 0.5
+ """
+ if filename is None:
+ return True
+ return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
+
+ def update_template_context(self, context):
+ """Update the template context with some commonly used variables.
+ This injects request, session, config and g into the template
+ context as well as everything template context processors want
+ to inject. Note that the as of Flask 0.6, the original values
+ in the context will not be overridden if a context processor
+ decides to return a value with the same key.
+
+ :param context: the context as a dictionary that is updated in place
+ to add extra variables.
+ """
+ funcs = self.template_context_processors[None]
+ reqctx = _request_ctx_stack.top
+ if reqctx is not None:
+ bp = reqctx.request.blueprint
+ if bp is not None and bp in self.template_context_processors:
+ funcs = chain(funcs, self.template_context_processors[bp])
+ orig_ctx = context.copy()
+ for func in funcs:
+ context.update(func())
+ # make sure the original values win. This makes it possible to
+ # easier add new variables in context processors without breaking
+ # existing views.
+ context.update(orig_ctx)
+
+ def make_shell_context(self):
+ """Returns the shell context for an interactive shell for this
+ application. This runs all the registered shell context
+ processors.
+
+ .. versionadded:: 0.11
+ """
+ rv = {'app': self, 'g': g}
+ for processor in self.shell_context_processors:
+ rv.update(processor())
+ return rv
+
+ def run(self, host=None, port=None, debug=None, **options):
+ """Runs the application on a local development server.
+
+ Do not use ``run()`` in a production setting. It is not intended to
+ meet security and performance requirements for a production server.
+ Instead, see :ref:`deployment` for WSGI server recommendations.
+
+ If the :attr:`debug` flag is set the server will automatically reload
+ for code changes and show a debugger in case an exception happened.
+
+ If you want to run the application in debug mode, but disable the
+ code execution on the interactive debugger, you can pass
+ ``use_evalex=False`` as parameter. This will keep the debugger's
+ traceback screen active, but disable code execution.
+
+ It is not recommended to use this function for development with
+ automatic reloading as this is badly supported. Instead you should
+ be using the :command:`flask` command line script's ``run`` support.
+
+ .. admonition:: Keep in Mind
+
+ Flask will suppress any server error with a generic error page
+ unless it is in debug mode. As such to enable just the
+ interactive debugger without the code reloading, you have to
+ invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
+ Setting ``use_debugger`` to ``True`` without being in debug mode
+ won't catch any exceptions because there won't be any to
+ catch.
+
+ .. versionchanged:: 0.10
+ The default port is now picked from the ``SERVER_NAME`` variable.
+
+ :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
+ have the server available externally as well. Defaults to
+ ``'127.0.0.1'``.
+ :param port: the port of the webserver. Defaults to ``5000`` or the
+ port defined in the ``SERVER_NAME`` config variable if
+ present.
+ :param debug: if given, enable or disable debug mode.
+ See :attr:`debug`.
+ :param options: the options to be forwarded to the underlying
+ Werkzeug server. See
+ :func:`werkzeug.serving.run_simple` for more
+ information.
+ """
+ from werkzeug.serving import run_simple
+ if host is None:
+ host = '127.0.0.1'
+ if port is None:
+ server_name = self.config['SERVER_NAME']
+ if server_name and ':' in server_name:
+ port = int(server_name.rsplit(':', 1)[1])
+ else:
+ port = 5000
+ if debug is not None:
+ self.debug = bool(debug)
+ options.setdefault('use_reloader', self.debug)
+ options.setdefault('use_debugger', self.debug)
+ try:
+ run_simple(host, port, self, **options)
+ finally:
+ # reset the first request information if the development server
+ # reset normally. This makes it possible to restart the server
+ # without reloader and that stuff from an interactive shell.
+ self._got_first_request = False
+
+ def test_client(self, use_cookies=True, **kwargs):
+ """Creates a test client for this application. For information
+ about unit testing head over to :ref:`testing`.
+
+ Note that if you are testing for assertions or exceptions in your
+ application code, you must set ``app.testing = True`` in order for the
+ exceptions to propagate to the test client. Otherwise, the exception
+ will be handled by the application (not visible to the test client) and
+ the only indication of an AssertionError or other exception will be a
+ 500 status code response to the test client. See the :attr:`testing`
+ attribute. For example::
+
+ app.testing = True
+ client = app.test_client()
+
+ The test client can be used in a ``with`` block to defer the closing down
+ of the context until the end of the ``with`` block. This is useful if
+ you want to access the context locals for testing::
+
+ with app.test_client() as c:
+ rv = c.get('/?vodka=42')
+ assert request.args['vodka'] == '42'
+
+ Additionally, you may pass optional keyword arguments that will then
+ be passed to the application's :attr:`test_client_class` constructor.
+ For example::
+
+ from flask.testing import FlaskClient
+
+ class CustomClient(FlaskClient):
+ def __init__(self, *args, **kwargs):
+ self._authentication = kwargs.pop("authentication")
+ super(CustomClient,self).__init__( *args, **kwargs)
+
+ app.test_client_class = CustomClient
+ client = app.test_client(authentication='Basic ....')
+
+ See :class:`~flask.testing.FlaskClient` for more information.
+
+ .. versionchanged:: 0.4
+ added support for ``with`` block usage for the client.
+
+ .. versionadded:: 0.7
+ The `use_cookies` parameter was added as well as the ability
+ to override the client to be used by setting the
+ :attr:`test_client_class` attribute.
+
+ .. versionchanged:: 0.11
+ Added `**kwargs` to support passing additional keyword arguments to
+ the constructor of :attr:`test_client_class`.
+ """
+ cls = self.test_client_class
+ if cls is None:
+ from flask.testing import FlaskClient as cls
+ return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
+
+ def open_session(self, request):
+ """Creates or opens a new session. Default implementation stores all
+ session data in a signed cookie. This requires that the
+ :attr:`secret_key` is set. Instead of overriding this method
+ we recommend replacing the :class:`session_interface`.
+
+ :param request: an instance of :attr:`request_class`.
+ """
+ return self.session_interface.open_session(self, request)
+
+ def save_session(self, session, response):
+ """Saves the session if it needs updates. For the default
+ implementation, check :meth:`open_session`. Instead of overriding this
+ method we recommend replacing the :class:`session_interface`.
+
+ :param session: the session to be saved (a
+ :class:`~werkzeug.contrib.securecookie.SecureCookie`
+ object)
+ :param response: an instance of :attr:`response_class`
+ """
+ return self.session_interface.save_session(self, session, response)
+
+ def make_null_session(self):
+ """Creates a new instance of a missing session. Instead of overriding
+ this method we recommend replacing the :class:`session_interface`.
+
+ .. versionadded:: 0.7
+ """
+ return self.session_interface.make_null_session(self)
+
+ @setupmethod
+ def register_blueprint(self, blueprint, **options):
+ """Registers a blueprint on the application.
+
+ .. versionadded:: 0.7
+ """
+ first_registration = False
+ if blueprint.name in self.blueprints:
+ assert self.blueprints[blueprint.name] is blueprint, \
+ 'A blueprint\'s name collision occurred between %r and ' \
+ '%r. Both share the same name "%s". Blueprints that ' \
+ 'are created on the fly need unique names.' % \
+ (blueprint, self.blueprints[blueprint.name], blueprint.name)
+ else:
+ self.blueprints[blueprint.name] = blueprint
+ self._blueprint_order.append(blueprint)
+ first_registration = True
+ blueprint.register(self, options, first_registration)
+
+ def iter_blueprints(self):
+ """Iterates over all blueprints by the order they were registered.
+
+ .. versionadded:: 0.11
+ """
+ return iter(self._blueprint_order)
+
+ @setupmethod
+ def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+ """Connects a URL rule. Works exactly like the :meth:`route`
+ decorator. If a view_func is provided it will be registered with the
+ endpoint.
+
+ Basically this example::
+
+ @app.route('/')
+ def index():
+ pass
+
+ Is equivalent to the following::
+
+ def index():
+ pass
+ app.add_url_rule('/', 'index', index)
+
+ If the view_func is not provided you will need to connect the endpoint
+ to a view function like so::
+
+ app.view_functions['index'] = index
+
+ Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
+ to customize the behavior via subclassing you only need to change
+ this method.
+
+ For more information refer to :ref:`url-route-registrations`.
+
+ .. versionchanged:: 0.2
+ `view_func` parameter added.
+
+ .. versionchanged:: 0.6
+ ``OPTIONS`` is added automatically as method.
+
+ :param rule: the URL rule as string
+ :param endpoint: the endpoint for the registered URL rule. Flask
+ itself assumes the name of the view function as
+ endpoint
+ :param view_func: the function to call when serving a request to the
+ provided endpoint
+ :param options: the options to be forwarded to the underlying
+ :class:`~werkzeug.routing.Rule` object. A change
+ to Werkzeug is handling of method options. methods
+ is a list of methods this rule should be limited
+ to (``GET``, ``POST`` etc.). By default a rule
+ just listens for ``GET`` (and implicitly ``HEAD``).
+ Starting with Flask 0.6, ``OPTIONS`` is implicitly
+ added and handled by the standard request handling.
+ """
+ if endpoint is None:
+ endpoint = _endpoint_from_view_func(view_func)
+ options['endpoint'] = endpoint
+ methods = options.pop('methods', None)
+
+ # if the methods are not given and the view_func object knows its
+ # methods we can use that instead. If neither exists, we go with
+ # a tuple of only ``GET`` as default.
+ if methods is None:
+ methods = getattr(view_func, 'methods', None) or ('GET',)
+ if isinstance(methods, string_types):
+ raise TypeError('Allowed methods have to be iterables of strings, '
+ 'for example: @app.route(..., methods=["POST"])')
+ methods = set(item.upper() for item in methods)
+
+ # Methods that should always be added
+ required_methods = set(getattr(view_func, 'required_methods', ()))
+
+ # starting with Flask 0.8 the view_func object can disable and
+ # force-enable the automatic options handling.
+ provide_automatic_options = getattr(view_func,
+ 'provide_automatic_options', None)
+
+ if provide_automatic_options is None:
+ if 'OPTIONS' not in methods:
+ provide_automatic_options = True
+ required_methods.add('OPTIONS')
+ else:
+ provide_automatic_options = False
+
+ # Add the required methods now.
+ methods |= required_methods
+
+ rule = self.url_rule_class(rule, methods=methods, **options)
+ rule.provide_automatic_options = provide_automatic_options
+
+ self.url_map.add(rule)
+ if view_func is not None:
+ old_func = self.view_functions.get(endpoint)
+ if old_func is not None and old_func != view_func:
+ raise AssertionError('View function mapping is overwriting an '
+ 'existing endpoint function: %s' % endpoint)
+ self.view_functions[endpoint] = view_func
+
+ def route(self, rule, **options):
+ """A decorator that is used to register a view function for a
+ given URL rule. This does the same thing as :meth:`add_url_rule`
+ but is intended for decorator usage::
+
+ @app.route('/')
+ def index():
+ return 'Hello World'
+
+ For more information refer to :ref:`url-route-registrations`.
+
+ :param rule: the URL rule as string
+ :param endpoint: the endpoint for the registered URL rule. Flask
+ itself assumes the name of the view function as
+ endpoint
+ :param options: the options to be forwarded to the underlying
+ :class:`~werkzeug.routing.Rule` object. A change
+ to Werkzeug is handling of method options. methods
+ is a list of methods this rule should be limited
+ to (``GET``, ``POST`` etc.). By default a rule
+ just listens for ``GET`` (and implicitly ``HEAD``).
+ Starting with Flask 0.6, ``OPTIONS`` is implicitly
+ added and handled by the standard request handling.
+ """
+ def decorator(f):
+ endpoint = options.pop('endpoint', None)
+ self.add_url_rule(rule, endpoint, f, **options)
+ return f
+ return decorator
+
+ @setupmethod
+ def endpoint(self, endpoint):
+ """A decorator to register a function as an endpoint.
+ Example::
+
+ @app.endpoint('example.endpoint')
+ def example():
+ return "example"
+
+ :param endpoint: the name of the endpoint
+ """
+ def decorator(f):
+ self.view_functions[endpoint] = f
+ return f
+ return decorator
+
+ @staticmethod
+ def _get_exc_class_and_code(exc_class_or_code):
+ """Ensure that we register only exceptions as handler keys"""
+ if isinstance(exc_class_or_code, integer_types):
+ exc_class = default_exceptions[exc_class_or_code]
+ else:
+ exc_class = exc_class_or_code
+
+ assert issubclass(exc_class, Exception)
+
+ if issubclass(exc_class, HTTPException):
+ return exc_class, exc_class.code
+ else:
+ return exc_class, None
+
+ @setupmethod
+ def errorhandler(self, code_or_exception):
+ """A decorator that is used to register a function given an
+ error code. Example::
+
+ @app.errorhandler(404)
+ def page_not_found(error):
+ return 'This page does not exist', 404
+
+ You can also register handlers for arbitrary exceptions::
+
+ @app.errorhandler(DatabaseError)
+ def special_exception_handler(error):
+ return 'Database connection failed', 500
+
+ You can also register a function as error handler without using
+ the :meth:`errorhandler` decorator. The following example is
+ equivalent to the one above::
+
+ def page_not_found(error):
+ return 'This page does not exist', 404
+ app.error_handler_spec[None][404] = page_not_found
+
+ Setting error handlers via assignments to :attr:`error_handler_spec`
+ however is discouraged as it requires fiddling with nested dictionaries
+ and the special case for arbitrary exception types.
+
+ The first ``None`` refers to the active blueprint. If the error
+ handler should be application wide ``None`` shall be used.
+
+ .. versionadded:: 0.7
+ Use :meth:`register_error_handler` instead of modifying
+ :attr:`error_handler_spec` directly, for application wide error
+ handlers.
+
+ .. versionadded:: 0.7
+ One can now additionally also register custom exception types
+ that do not necessarily have to be a subclass of the
+ :class:`~werkzeug.exceptions.HTTPException` class.
+
+ :param code_or_exception: the code as integer for the handler, or
+ an arbitrary exception
+ """
+ def decorator(f):
+ self._register_error_handler(None, code_or_exception, f)
+ return f
+ return decorator
+
+ def register_error_handler(self, code_or_exception, f):
+ """Alternative error attach function to the :meth:`errorhandler`
+ decorator that is more straightforward to use for non decorator
+ usage.
+
+ .. versionadded:: 0.7
+ """
+ self._register_error_handler(None, code_or_exception, f)
+
+ @setupmethod
+ def _register_error_handler(self, key, code_or_exception, f):
+ """
+ :type key: None|str
+ :type code_or_exception: int|T<=Exception
+ :type f: callable
+ """
+ if isinstance(code_or_exception, HTTPException): # old broken behavior
+ raise ValueError(
+ 'Tried to register a handler for an exception instance {0!r}. '
+ 'Handlers can only be registered for exception classes or HTTP error codes.'
+ .format(code_or_exception))
+
+ exc_class, code = self._get_exc_class_and_code(code_or_exception)
+
+ handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
+ handlers[exc_class] = f
+
+ @setupmethod
+ def template_filter(self, name=None):
+ """A decorator that is used to register custom template filter.
+ You can specify a name for the filter, otherwise the function
+ name will be used. Example::
+
+ @app.template_filter()
+ def reverse(s):
+ return s[::-1]
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_template_filter(f, name=name)
+ return f
+ return decorator
+
+ @setupmethod
+ def add_template_filter(self, f, name=None):
+ """Register a custom template filter. Works exactly like the
+ :meth:`template_filter` decorator.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.filters[name or f.__name__] = f
+
+ @setupmethod
+ def template_test(self, name=None):
+ """A decorator that is used to register custom template test.
+ You can specify a name for the test, otherwise the function
+ name will be used. Example::
+
+ @app.template_test()
+ def is_prime(n):
+ if n == 2:
+ return True
+ for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
+ if n % i == 0:
+ return False
+ return True
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_template_test(f, name=name)
+ return f
+ return decorator
+
+ @setupmethod
+ def add_template_test(self, f, name=None):
+ """Register a custom template test. Works exactly like the
+ :meth:`template_test` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.tests[name or f.__name__] = f
+
+ @setupmethod
+ def template_global(self, name=None):
+ """A decorator that is used to register a custom template global function.
+ You can specify a name for the global function, otherwise the function
+ name will be used. Example::
+
+ @app.template_global()
+ def double(n):
+ return 2 * n
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global function, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_template_global(f, name=name)
+ return f
+ return decorator
+
+ @setupmethod
+ def add_template_global(self, f, name=None):
+ """Register a custom template global function. Works exactly like the
+ :meth:`template_global` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global function, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.globals[name or f.__name__] = f
+
+ @setupmethod
+ def before_request(self, f):
+ """Registers a function to run before each request.
+
+ The function will be called without any arguments.
+ If the function returns a non-None value, it's handled as
+ if it was the return value from the view and further
+ request handling is stopped.
+ """
+ self.before_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def before_first_request(self, f):
+ """Registers a function to be run before the first request to this
+ instance of the application.
+
+ The function will be called without any arguments and its return
+ value is ignored.
+
+ .. versionadded:: 0.8
+ """
+ self.before_first_request_funcs.append(f)
+ return f
+
+ @setupmethod
+ def after_request(self, f):
+ """Register a function to be run after each request.
+
+ Your function must take one parameter, an instance of
+ :attr:`response_class` and return a new response object or the
+ same (see :meth:`process_response`).
+
+ As of Flask 0.7 this function might not be executed at the end of the
+ request in case an unhandled exception occurred.
+ """
+ self.after_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def teardown_request(self, f):
+ """Register a function to be run at the end of each request,
+ regardless of whether there was an exception or not. These functions
+ are executed when the request context is popped, even if not an
+ actual request was performed.
+
+ Example::
+
+ ctx = app.test_request_context()
+ ctx.push()
+ ...
+ ctx.pop()
+
+ When ``ctx.pop()`` is executed in the above example, the teardown
+ functions are called just before the request context moves from the
+ stack of active contexts. This becomes relevant if you are using
+ such constructs in tests.
+
+ Generally teardown functions must take every necessary step to avoid
+ that they will fail. If they do execute code that might fail they
+ will have to surround the execution of these code by try/except
+ statements and log occurring errors.
+
+ When a teardown function was called because of a exception it will
+ be passed an error object.
+
+ The return values of teardown functions are ignored.
+
+ .. admonition:: Debug Note
+
+ In debug mode Flask will not tear down a request on an exception
+ immediately. Instead it will keep it alive so that the interactive
+ debugger can still access it. This behavior can be controlled
+ by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
+ """
+ self.teardown_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def teardown_appcontext(self, f):
+ """Registers a function to be called when the application context
+ ends. These functions are typically also called when the request
+ context is popped.
+
+ Example::
+
+ ctx = app.app_context()
+ ctx.push()
+ ...
+ ctx.pop()
+
+ When ``ctx.pop()`` is executed in the above example, the teardown
+ functions are called just before the app context moves from the
+ stack of active contexts. This becomes relevant if you are using
+ such constructs in tests.
+
+ Since a request context typically also manages an application
+ context it would also be called when you pop a request context.
+
+ When a teardown function was called because of an exception it will
+ be passed an error object.
+
+ The return values of teardown functions are ignored.
+
+ .. versionadded:: 0.9
+ """
+ self.teardown_appcontext_funcs.append(f)
+ return f
+
+ @setupmethod
+ def context_processor(self, f):
+ """Registers a template context processor function."""
+ self.template_context_processors[None].append(f)
+ return f
+
+ @setupmethod
+ def shell_context_processor(self, f):
+ """Registers a shell context processor function.
+
+ .. versionadded:: 0.11
+ """
+ self.shell_context_processors.append(f)
+ return f
+
+ @setupmethod
+ def url_value_preprocessor(self, f):
+ """Registers a function as URL value preprocessor for all view
+ functions of the application. It's called before the view functions
+ are called and can modify the url values provided.
+ """
+ self.url_value_preprocessors.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def url_defaults(self, f):
+ """Callback function for URL defaults for all view functions of the
+ application. It's called with the endpoint and values and should
+ update the values passed in place.
+ """
+ self.url_default_functions.setdefault(None, []).append(f)
+ return f
+
+ def _find_error_handler(self, e):
+ """Finds a registered error handler for the request’s blueprint.
+ Otherwise falls back to the app, returns None if not a suitable
+ handler is found.
+ """
+ exc_class, code = self._get_exc_class_and_code(type(e))
+
+ def find_handler(handler_map):
+ if not handler_map:
+ return
+ for cls in exc_class.__mro__:
+ handler = handler_map.get(cls)
+ if handler is not None:
+ # cache for next time exc_class is raised
+ handler_map[exc_class] = handler
+ return handler
+
+ # try blueprint handlers
+ handler = find_handler(self.error_handler_spec
+ .get(request.blueprint, {})
+ .get(code))
+ if handler is not None:
+ return handler
+
+ # fall back to app handlers
+ return find_handler(self.error_handler_spec[None].get(code))
+
+ def handle_http_exception(self, e):
+ """Handles an HTTP exception. By default this will invoke the
+ registered error handlers and fall back to returning the
+ exception as response.
+
+ .. versionadded:: 0.3
+ """
+ # Proxy exceptions don't have error codes. We want to always return
+ # those unchanged as errors
+ if e.code is None:
+ return e
+
+ handler = self._find_error_handler(e)
+ if handler is None:
+ return e
+ return handler(e)
+
+ def trap_http_exception(self, e):
+ """Checks if an HTTP exception should be trapped or not. By default
+ this will return ``False`` for all exceptions except for a bad request
+ key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
+ also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
+
+ This is called for all HTTP exceptions raised by a view function.
+ If it returns ``True`` for any exception the error handler for this
+ exception is not called and it shows up as regular exception in the
+ traceback. This is helpful for debugging implicitly raised HTTP
+ exceptions.
+
+ .. versionadded:: 0.8
+ """
+ if self.config['TRAP_HTTP_EXCEPTIONS']:
+ return True
+ if self.config['TRAP_BAD_REQUEST_ERRORS']:
+ return isinstance(e, BadRequest)
+ return False
+
+ def handle_user_exception(self, e):
+ """This method is called whenever an exception occurs that should be
+ handled. A special case are
+ :class:`~werkzeug.exception.HTTPException`\s which are forwarded by
+ this function to the :meth:`handle_http_exception` method. This
+ function will either return a response value or reraise the
+ exception with the same traceback.
+
+ .. versionadded:: 0.7
+ """
+ exc_type, exc_value, tb = sys.exc_info()
+ assert exc_value is e
+
+ # ensure not to trash sys.exc_info() at that point in case someone
+ # wants the traceback preserved in handle_http_exception. Of course
+ # we cannot prevent users from trashing it themselves in a custom
+ # trap_http_exception method so that's their fault then.
+
+ if isinstance(e, HTTPException) and not self.trap_http_exception(e):
+ return self.handle_http_exception(e)
+
+ handler = self._find_error_handler(e)
+
+ if handler is None:
+ reraise(exc_type, exc_value, tb)
+ return handler(e)
+
+ def handle_exception(self, e):
+ """Default exception handling that kicks in when an exception
+ occurs that is not caught. In debug mode the exception will
+ be re-raised immediately, otherwise it is logged and the handler
+ for a 500 internal server error is used. If no such handler
+ exists, a default 500 internal server error message is displayed.
+
+ .. versionadded:: 0.3
+ """
+ exc_type, exc_value, tb = sys.exc_info()
+
+ got_request_exception.send(self, exception=e)
+ handler = self._find_error_handler(InternalServerError())
+
+ if self.propagate_exceptions:
+ # if we want to repropagate the exception, we can attempt to
+ # raise it with the whole traceback in case we can do that
+ # (the function was actually called from the except part)
+ # otherwise, we just raise the error again
+ if exc_value is e:
+ reraise(exc_type, exc_value, tb)
+ else:
+ raise e
+
+ self.log_exception((exc_type, exc_value, tb))
+ if handler is None:
+ return InternalServerError()
+ return self.finalize_request(handler(e), from_error_handler=True)
+
+ def log_exception(self, exc_info):
+ """Logs an exception. This is called by :meth:`handle_exception`
+ if debugging is disabled and right before the handler is called.
+ The default implementation logs the exception as error on the
+ :attr:`logger`.
+
+ .. versionadded:: 0.8
+ """
+ self.logger.error('Exception on %s [%s]' % (
+ request.path,
+ request.method
+ ), exc_info=exc_info)
+
+ def raise_routing_exception(self, request):
+ """Exceptions that are recording during routing are reraised with
+ this method. During debug we are not reraising redirect requests
+ for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
+ a different error instead to help debug situations.
+
+ :internal:
+ """
+ if not self.debug \
+ or not isinstance(request.routing_exception, RequestRedirect) \
+ or request.method in ('GET', 'HEAD', 'OPTIONS'):
+ raise request.routing_exception
+
+ from .debughelpers import FormDataRoutingRedirect
+ raise FormDataRoutingRedirect(request)
+
+ def dispatch_request(self):
+ """Does the request dispatching. Matches the URL and returns the
+ return value of the view or error handler. This does not have to
+ be a response object. In order to convert the return value to a
+ proper response object, call :func:`make_response`.
+
+ .. versionchanged:: 0.7
+ This no longer does the exception handling, this code was
+ moved to the new :meth:`full_dispatch_request`.
+ """
+ req = _request_ctx_stack.top.request
+ if req.routing_exception is not None:
+ self.raise_routing_exception(req)
+ rule = req.url_rule
+ # if we provide automatic options for this URL and the
+ # request came with the OPTIONS method, reply automatically
+ if getattr(rule, 'provide_automatic_options', False) \
+ and req.method == 'OPTIONS':
+ return self.make_default_options_response()
+ # otherwise dispatch to the handler for that endpoint
+ return self.view_functions[rule.endpoint](**req.view_args)
+
+ def full_dispatch_request(self):
+ """Dispatches the request and on top of that performs request
+ pre and postprocessing as well as HTTP exception catching and
+ error handling.
+
+ .. versionadded:: 0.7
+ """
+ self.try_trigger_before_first_request_functions()
+ try:
+ request_started.send(self)
+ rv = self.preprocess_request()
+ if rv is None:
+ rv = self.dispatch_request()
+ except Exception as e:
+ rv = self.handle_user_exception(e)
+ return self.finalize_request(rv)
+
+ def finalize_request(self, rv, from_error_handler=False):
+ """Given the return value from a view function this finalizes
+ the request by converting it into a response and invoking the
+ postprocessing functions. This is invoked for both normal
+ request dispatching as well as error handlers.
+
+ Because this means that it might be called as a result of a
+ failure a special safe mode is available which can be enabled
+ with the `from_error_handler` flag. If enabled, failures in
+ response processing will be logged and otherwise ignored.
+
+ :internal:
+ """
+ response = self.make_response(rv)
+ try:
+ response = self.process_response(response)
+ request_finished.send(self, response=response)
+ except Exception:
+ if not from_error_handler:
+ raise
+ self.logger.exception('Request finalizing failed with an '
+ 'error while handling an error')
+ return response
+
+ def try_trigger_before_first_request_functions(self):
+ """Called before each request and will ensure that it triggers
+ the :attr:`before_first_request_funcs` and only exactly once per
+ application instance (which means process usually).
+
+ :internal:
+ """
+ if self._got_first_request:
+ return
+ with self._before_request_lock:
+ if self._got_first_request:
+ return
+ for func in self.before_first_request_funcs:
+ func()
+ self._got_first_request = True
+
+ def make_default_options_response(self):
+ """This method is called to create the default ``OPTIONS`` response.
+ This can be changed through subclassing to change the default
+ behavior of ``OPTIONS`` responses.
+
+ .. versionadded:: 0.7
+ """
+ adapter = _request_ctx_stack.top.url_adapter
+ if hasattr(adapter, 'allowed_methods'):
+ methods = adapter.allowed_methods()
+ else:
+ # fallback for Werkzeug < 0.7
+ methods = []
+ try:
+ adapter.match(method='--')
+ except MethodNotAllowed as e:
+ methods = e.valid_methods
+ except HTTPException as e:
+ pass
+ rv = self.response_class()
+ rv.allow.update(methods)
+ return rv
+
+ def should_ignore_error(self, error):
+ """This is called to figure out if an error should be ignored
+ or not as far as the teardown system is concerned. If this
+ function returns ``True`` then the teardown handlers will not be
+ passed the error.
+
+ .. versionadded:: 0.10
+ """
+ return False
+
+ def make_response(self, rv):
+ """Converts the return value from a view function to a real
+ response object that is an instance of :attr:`response_class`.
+
+ The following types are allowed for `rv`:
+
+ .. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
+
+ ======================= ===========================================
+ :attr:`response_class` the object is returned unchanged
+ :class:`str` a response object is created with the
+ string as body
+ :class:`unicode` a response object is created with the
+ string encoded to utf-8 as body
+ a WSGI function the function is called as WSGI application
+ and buffered as response object
+ :class:`tuple` A tuple in the form ``(response, status,
+ headers)`` or ``(response, headers)``
+ where `response` is any of the
+ types defined here, `status` is a string
+ or an integer and `headers` is a list or
+ a dictionary with header values.
+ ======================= ===========================================
+
+ :param rv: the return value from the view function
+
+ .. versionchanged:: 0.9
+ Previously a tuple was interpreted as the arguments for the
+ response object.
+ """
+ status_or_headers = headers = None
+ if isinstance(rv, tuple):
+ rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
+
+ if rv is None:
+ raise ValueError('View function did not return a response')
+
+ if isinstance(status_or_headers, (dict, list)):
+ headers, status_or_headers = status_or_headers, None
+
+ if not isinstance(rv, self.response_class):
+ # When we create a response object directly, we let the constructor
+ # set the headers and status. We do this because there can be
+ # some extra logic involved when creating these objects with
+ # specific values (like default content type selection).
+ if isinstance(rv, (text_type, bytes, bytearray)):
+ rv = self.response_class(rv, headers=headers,
+ status=status_or_headers)
+ headers = status_or_headers = None
+ else:
+ rv = self.response_class.force_type(rv, request.environ)
+
+ if status_or_headers is not None:
+ if isinstance(status_or_headers, string_types):
+ rv.status = status_or_headers
+ else:
+ rv.status_code = status_or_headers
+ if headers:
+ rv.headers.extend(headers)
+
+ return rv
+
+ def create_url_adapter(self, request):
+ """Creates a URL adapter for the given request. The URL adapter
+ is created at a point where the request context is not yet set up
+ so the request is passed explicitly.
+
+ .. versionadded:: 0.6
+
+ .. versionchanged:: 0.9
+ This can now also be called without a request object when the
+ URL adapter is created for the application context.
+ """
+ if request is not None:
+ return self.url_map.bind_to_environ(request.environ,
+ server_name=self.config['SERVER_NAME'])
+ # We need at the very least the server name to be set for this
+ # to work.
+ if self.config['SERVER_NAME'] is not None:
+ return self.url_map.bind(
+ self.config['SERVER_NAME'],
+ script_name=self.config['APPLICATION_ROOT'] or '/',
+ url_scheme=self.config['PREFERRED_URL_SCHEME'])
+
+ def inject_url_defaults(self, endpoint, values):
+ """Injects the URL defaults for the given endpoint directly into
+ the values dictionary passed. This is used internally and
+ automatically called on URL building.
+
+ .. versionadded:: 0.7
+ """
+ funcs = self.url_default_functions.get(None, ())
+ if '.' in endpoint:
+ bp = endpoint.rsplit('.', 1)[0]
+ funcs = chain(funcs, self.url_default_functions.get(bp, ()))
+ for func in funcs:
+ func(endpoint, values)
+
+ def handle_url_build_error(self, error, endpoint, values):
+ """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
+ """
+ exc_type, exc_value, tb = sys.exc_info()
+ for handler in self.url_build_error_handlers:
+ try:
+ rv = handler(error, endpoint, values)
+ if rv is not None:
+ return rv
+ except BuildError as e:
+ # make error available outside except block (py3)
+ error = e
+
+ # At this point we want to reraise the exception. If the error is
+ # still the same one we can reraise it with the original traceback,
+ # otherwise we raise it from here.
+ if error is exc_value:
+ reraise(exc_type, exc_value, tb)
+ raise error
+
+ def preprocess_request(self):
+ """Called before the actual request dispatching and will
+ call each :meth:`before_request` decorated function, passing no
+ arguments.
+ If any of these functions returns a value, it's handled as
+ if it was the return value from the view and further
+ request handling is stopped.
+
+ This also triggers the :meth:`url_value_preprocessor` functions before
+ the actual :meth:`before_request` functions are called.
+ """
+ bp = _request_ctx_stack.top.request.blueprint
+
+ funcs = self.url_value_preprocessors.get(None, ())
+ if bp is not None and bp in self.url_value_preprocessors:
+ funcs = chain(funcs, self.url_value_preprocessors[bp])
+ for func in funcs:
+ func(request.endpoint, request.view_args)
+
+ funcs = self.before_request_funcs.get(None, ())
+ if bp is not None and bp in self.before_request_funcs:
+ funcs = chain(funcs, self.before_request_funcs[bp])
+ for func in funcs:
+ rv = func()
+ if rv is not None:
+ return rv
+
+ def process_response(self, response):
+ """Can be overridden in order to modify the response object
+ before it's sent to the WSGI server. By default this will
+ call all the :meth:`after_request` decorated functions.
+
+ .. versionchanged:: 0.5
+ As of Flask 0.5 the functions registered for after request
+ execution are called in reverse order of registration.
+
+ :param response: a :attr:`response_class` object.
+ :return: a new response object or the same, has to be an
+ instance of :attr:`response_class`.
+ """
+ ctx = _request_ctx_stack.top
+ bp = ctx.request.blueprint
+ funcs = ctx._after_request_functions
+ if bp is not None and bp in self.after_request_funcs:
+ funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
+ if None in self.after_request_funcs:
+ funcs = chain(funcs, reversed(self.after_request_funcs[None]))
+ for handler in funcs:
+ response = handler(response)
+ if not self.session_interface.is_null_session(ctx.session):
+ self.save_session(ctx.session, response)
+ return response
+
+ def do_teardown_request(self, exc=_sentinel):
+ """Called after the actual request dispatching and will
+ call every as :meth:`teardown_request` decorated function. This is
+ not actually called by the :class:`Flask` object itself but is always
+ triggered when the request context is popped. That way we have a
+ tighter control over certain resources under testing environments.
+
+ .. versionchanged:: 0.9
+ Added the `exc` argument. Previously this was always using the
+ current exception information.
+ """
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ funcs = reversed(self.teardown_request_funcs.get(None, ()))
+ bp = _request_ctx_stack.top.request.blueprint
+ if bp is not None and bp in self.teardown_request_funcs:
+ funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
+ for func in funcs:
+ func(exc)
+ request_tearing_down.send(self, exc=exc)
+
+ def do_teardown_appcontext(self, exc=_sentinel):
+ """Called when an application context is popped. This works pretty
+ much the same as :meth:`do_teardown_request` but for the application
+ context.
+
+ .. versionadded:: 0.9
+ """
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ for func in reversed(self.teardown_appcontext_funcs):
+ func(exc)
+ appcontext_tearing_down.send(self, exc=exc)
+
+ def app_context(self):
+ """Binds the application only. For as long as the application is bound
+ to the current context the :data:`flask.current_app` points to that
+ application. An application context is automatically created when a
+ request context is pushed if necessary.
+
+ Example usage::
+
+ with app.app_context():
+ ...
+
+ .. versionadded:: 0.9
+ """
+ return AppContext(self)
+
+ def request_context(self, environ):
+ """Creates a :class:`~flask.ctx.RequestContext` from the given
+ environment and binds it to the current context. This must be used in
+ combination with the ``with`` statement because the request is only bound
+ to the current context for the duration of the ``with`` block.
+
+ Example usage::
+
+ with app.request_context(environ):
+ do_something_with(request)
+
+ The object returned can also be used without the ``with`` statement
+ which is useful for working in the shell. The example above is
+ doing exactly the same as this code::
+
+ ctx = app.request_context(environ)
+ ctx.push()
+ try:
+ do_something_with(request)
+ finally:
+ ctx.pop()
+
+ .. versionchanged:: 0.3
+ Added support for non-with statement usage and ``with`` statement
+ is now passed the ctx object.
+
+ :param environ: a WSGI environment
+ """
+ return RequestContext(self, environ)
+
+ def test_request_context(self, *args, **kwargs):
+ """Creates a WSGI environment from the given values (see
+ :class:`werkzeug.test.EnvironBuilder` for more information, this
+ function accepts the same arguments).
+ """
+ from flask.testing import make_test_environ_builder
+ builder = make_test_environ_builder(self, *args, **kwargs)
+ try:
+ return self.request_context(builder.get_environ())
+ finally:
+ builder.close()
+
+ def wsgi_app(self, environ, start_response):
+ """The actual WSGI application. This is not implemented in
+ `__call__` so that middlewares can be applied without losing a
+ reference to the class. So instead of doing this::
+
+ app = MyMiddleware(app)
+
+ It's a better idea to do this instead::
+
+ app.wsgi_app = MyMiddleware(app.wsgi_app)
+
+ Then you still have the original application object around and
+ can continue to call methods on it.
+
+ .. versionchanged:: 0.7
+ The behavior of the before and after request callbacks was changed
+ under error conditions and a new callback was added that will
+ always execute at the end of the request, independent on if an
+ error occurred or not. See :ref:`callbacks-and-errors`.
+
+ :param environ: a WSGI environment
+ :param start_response: a callable accepting a status code,
+ a list of headers and an optional
+ exception context to start the response
+ """
+ ctx = self.request_context(environ)
+ ctx.push()
+ error = None
+ try:
+ try:
+ response = self.full_dispatch_request()
+ except Exception as e:
+ error = e
+ response = self.handle_exception(e)
+ except:
+ error = sys.exc_info()[1]
+ raise
+ return response(environ, start_response)
+ finally:
+ if self.should_ignore_error(error):
+ error = None
+ ctx.auto_pop(error)
+
+ def __call__(self, environ, start_response):
+ """Shortcut for :attr:`wsgi_app`."""
+ return self.wsgi_app(environ, start_response)
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self.name,
+ )
diff --git a/appWSM/lib/python2.7/site-packages/flask/blueprints.py b/appWSM/lib/python2.7/site-packages/flask/blueprints.py
new file mode 100644
index 0000000000000000000000000000000000000000..586a1b0b113fc493a7afcbcaa9b8968099fc8bd2
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/blueprints.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.blueprints
+ ~~~~~~~~~~~~~~~~
+
+ Blueprints are the recommended way to implement larger or more
+ pluggable applications in Flask 0.7 and later.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+from functools import update_wrapper
+
+from .helpers import _PackageBoundObject, _endpoint_from_view_func
+
+
+class BlueprintSetupState(object):
+ """Temporary holder object for registering a blueprint with the
+ application. An instance of this class is created by the
+ :meth:`~flask.Blueprint.make_setup_state` method and later passed
+ to all register callback functions.
+ """
+
+ def __init__(self, blueprint, app, options, first_registration):
+ #: a reference to the current application
+ self.app = app
+
+ #: a reference to the blueprint that created this setup state.
+ self.blueprint = blueprint
+
+ #: a dictionary with all options that were passed to the
+ #: :meth:`~flask.Flask.register_blueprint` method.
+ self.options = options
+
+ #: as blueprints can be registered multiple times with the
+ #: application and not everything wants to be registered
+ #: multiple times on it, this attribute can be used to figure
+ #: out if the blueprint was registered in the past already.
+ self.first_registration = first_registration
+
+ subdomain = self.options.get('subdomain')
+ if subdomain is None:
+ subdomain = self.blueprint.subdomain
+
+ #: The subdomain that the blueprint should be active for, ``None``
+ #: otherwise.
+ self.subdomain = subdomain
+
+ url_prefix = self.options.get('url_prefix')
+ if url_prefix is None:
+ url_prefix = self.blueprint.url_prefix
+
+ #: The prefix that should be used for all URLs defined on the
+ #: blueprint.
+ self.url_prefix = url_prefix
+
+ #: A dictionary with URL defaults that is added to each and every
+ #: URL that was defined with the blueprint.
+ self.url_defaults = dict(self.blueprint.url_values_defaults)
+ self.url_defaults.update(self.options.get('url_defaults', ()))
+
+ def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+ """A helper method to register a rule (and optionally a view function)
+ to the application. The endpoint is automatically prefixed with the
+ blueprint's name.
+ """
+ if self.url_prefix:
+ rule = self.url_prefix + rule
+ options.setdefault('subdomain', self.subdomain)
+ if endpoint is None:
+ endpoint = _endpoint_from_view_func(view_func)
+ defaults = self.url_defaults
+ if 'defaults' in options:
+ defaults = dict(defaults, **options.pop('defaults'))
+ self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
+ view_func, defaults=defaults, **options)
+
+
+class Blueprint(_PackageBoundObject):
+ """Represents a blueprint. A blueprint is an object that records
+ functions that will be called with the
+ :class:`~flask.blueprints.BlueprintSetupState` later to register functions
+ or other things on the main application. See :ref:`blueprints` for more
+ information.
+
+ .. versionadded:: 0.7
+ """
+
+ warn_on_modifications = False
+ _got_registered_once = False
+
+ def __init__(self, name, import_name, static_folder=None,
+ static_url_path=None, template_folder=None,
+ url_prefix=None, subdomain=None, url_defaults=None,
+ root_path=None):
+ _PackageBoundObject.__init__(self, import_name, template_folder,
+ root_path=root_path)
+ self.name = name
+ self.url_prefix = url_prefix
+ self.subdomain = subdomain
+ self.static_folder = static_folder
+ self.static_url_path = static_url_path
+ self.deferred_functions = []
+ if url_defaults is None:
+ url_defaults = {}
+ self.url_values_defaults = url_defaults
+
+ def record(self, func):
+ """Registers a function that is called when the blueprint is
+ registered on the application. This function is called with the
+ state as argument as returned by the :meth:`make_setup_state`
+ method.
+ """
+ if self._got_registered_once and self.warn_on_modifications:
+ from warnings import warn
+ warn(Warning('The blueprint was already registered once '
+ 'but is getting modified now. These changes '
+ 'will not show up.'))
+ self.deferred_functions.append(func)
+
+ def record_once(self, func):
+ """Works like :meth:`record` but wraps the function in another
+ function that will ensure the function is only called once. If the
+ blueprint is registered a second time on the application, the
+ function passed is not called.
+ """
+ def wrapper(state):
+ if state.first_registration:
+ func(state)
+ return self.record(update_wrapper(wrapper, func))
+
+ def make_setup_state(self, app, options, first_registration=False):
+ """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
+ object that is later passed to the register callback functions.
+ Subclasses can override this to return a subclass of the setup state.
+ """
+ return BlueprintSetupState(self, app, options, first_registration)
+
+ def register(self, app, options, first_registration=False):
+ """Called by :meth:`Flask.register_blueprint` to register a blueprint
+ on the application. This can be overridden to customize the register
+ behavior. Keyword arguments from
+ :func:`~flask.Flask.register_blueprint` are directly forwarded to this
+ method in the `options` dictionary.
+ """
+ self._got_registered_once = True
+ state = self.make_setup_state(app, options, first_registration)
+ if self.has_static_folder:
+ state.add_url_rule(self.static_url_path + '/',
+ view_func=self.send_static_file,
+ endpoint='static')
+
+ for deferred in self.deferred_functions:
+ deferred(state)
+
+ def route(self, rule, **options):
+ """Like :meth:`Flask.route` but for a blueprint. The endpoint for the
+ :func:`url_for` function is prefixed with the name of the blueprint.
+ """
+ def decorator(f):
+ endpoint = options.pop("endpoint", f.__name__)
+ self.add_url_rule(rule, endpoint, f, **options)
+ return f
+ return decorator
+
+ def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+ """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
+ the :func:`url_for` function is prefixed with the name of the blueprint.
+ """
+ if endpoint:
+ assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
+ self.record(lambda s:
+ s.add_url_rule(rule, endpoint, view_func, **options))
+
+ def endpoint(self, endpoint):
+ """Like :meth:`Flask.endpoint` but for a blueprint. This does not
+ prefix the endpoint with the blueprint name, this has to be done
+ explicitly by the user of this method. If the endpoint is prefixed
+ with a `.` it will be registered to the current blueprint, otherwise
+ it's an application independent endpoint.
+ """
+ def decorator(f):
+ def register_endpoint(state):
+ state.app.view_functions[endpoint] = f
+ self.record_once(register_endpoint)
+ return f
+ return decorator
+
+ def app_template_filter(self, name=None):
+ """Register a custom template filter, available application wide. Like
+ :meth:`Flask.template_filter` but for a blueprint.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_filter(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_filter(self, f, name=None):
+ """Register a custom template filter, available application wide. Like
+ :meth:`Flask.add_template_filter` but for a blueprint. Works exactly
+ like the :meth:`app_template_filter` decorator.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.filters[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def app_template_test(self, name=None):
+ """Register a custom template test, available application wide. Like
+ :meth:`Flask.template_test` but for a blueprint.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_test(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_test(self, f, name=None):
+ """Register a custom template test, available application wide. Like
+ :meth:`Flask.add_template_test` but for a blueprint. Works exactly
+ like the :meth:`app_template_test` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.tests[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def app_template_global(self, name=None):
+ """Register a custom template global, available application wide. Like
+ :meth:`Flask.template_global` but for a blueprint.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_global(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_global(self, f, name=None):
+ """Register a custom template global, available application wide. Like
+ :meth:`Flask.add_template_global` but for a blueprint. Works exactly
+ like the :meth:`app_template_global` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.globals[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def before_request(self, f):
+ """Like :meth:`Flask.before_request` but for a blueprint. This function
+ is only executed before each request that is handled by a function of
+ that blueprint.
+ """
+ self.record_once(lambda s: s.app.before_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def before_app_request(self, f):
+ """Like :meth:`Flask.before_request`. Such a function is executed
+ before each request, even if outside of a blueprint.
+ """
+ self.record_once(lambda s: s.app.before_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def before_app_first_request(self, f):
+ """Like :meth:`Flask.before_first_request`. Such a function is
+ executed before the first request to the application.
+ """
+ self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
+ return f
+
+ def after_request(self, f):
+ """Like :meth:`Flask.after_request` but for a blueprint. This function
+ is only executed after each request that is handled by a function of
+ that blueprint.
+ """
+ self.record_once(lambda s: s.app.after_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def after_app_request(self, f):
+ """Like :meth:`Flask.after_request` but for a blueprint. Such a function
+ is executed after each request, even if outside of the blueprint.
+ """
+ self.record_once(lambda s: s.app.after_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def teardown_request(self, f):
+ """Like :meth:`Flask.teardown_request` but for a blueprint. This
+ function is only executed when tearing down requests handled by a
+ function of that blueprint. Teardown request functions are executed
+ when the request context is popped, even when no actual request was
+ performed.
+ """
+ self.record_once(lambda s: s.app.teardown_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def teardown_app_request(self, f):
+ """Like :meth:`Flask.teardown_request` but for a blueprint. Such a
+ function is executed when tearing down each request, even if outside of
+ the blueprint.
+ """
+ self.record_once(lambda s: s.app.teardown_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def context_processor(self, f):
+ """Like :meth:`Flask.context_processor` but for a blueprint. This
+ function is only executed for requests handled by a blueprint.
+ """
+ self.record_once(lambda s: s.app.template_context_processors
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def app_context_processor(self, f):
+ """Like :meth:`Flask.context_processor` but for a blueprint. Such a
+ function is executed each request, even if outside of the blueprint.
+ """
+ self.record_once(lambda s: s.app.template_context_processors
+ .setdefault(None, []).append(f))
+ return f
+
+ def app_errorhandler(self, code):
+ """Like :meth:`Flask.errorhandler` but for a blueprint. This
+ handler is used for all requests, even if outside of the blueprint.
+ """
+ def decorator(f):
+ self.record_once(lambda s: s.app.errorhandler(code)(f))
+ return f
+ return decorator
+
+ def url_value_preprocessor(self, f):
+ """Registers a function as URL value preprocessor for this
+ blueprint. It's called before the view functions are called and
+ can modify the url values provided.
+ """
+ self.record_once(lambda s: s.app.url_value_preprocessors
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def url_defaults(self, f):
+ """Callback function for URL defaults for this blueprint. It's called
+ with the endpoint and values and should update the values passed
+ in place.
+ """
+ self.record_once(lambda s: s.app.url_default_functions
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def app_url_value_preprocessor(self, f):
+ """Same as :meth:`url_value_preprocessor` but application wide.
+ """
+ self.record_once(lambda s: s.app.url_value_preprocessors
+ .setdefault(None, []).append(f))
+ return f
+
+ def app_url_defaults(self, f):
+ """Same as :meth:`url_defaults` but application wide.
+ """
+ self.record_once(lambda s: s.app.url_default_functions
+ .setdefault(None, []).append(f))
+ return f
+
+ def errorhandler(self, code_or_exception):
+ """Registers an error handler that becomes active for this blueprint
+ only. Please be aware that routing does not happen local to a
+ blueprint so an error handler for 404 usually is not handled by
+ a blueprint unless it is caused inside a view function. Another
+ special case is the 500 internal server error which is always looked
+ up from the application.
+
+ Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
+ of the :class:`~flask.Flask` object.
+ """
+ def decorator(f):
+ self.record_once(lambda s: s.app._register_error_handler(
+ self.name, code_or_exception, f))
+ return f
+ return decorator
+
+ def register_error_handler(self, code_or_exception, f):
+ """Non-decorator version of the :meth:`errorhandler` error attach
+ function, akin to the :meth:`~flask.Flask.register_error_handler`
+ application-wide function of the :class:`~flask.Flask` object but
+ for error handlers limited to this blueprint.
+
+ .. versionadded:: 0.11
+ """
+ self.record_once(lambda s: s.app._register_error_handler(
+ self.name, code_or_exception, f))
diff --git a/appWSM/lib/python2.7/site-packages/flask/cli.py b/appWSM/lib/python2.7/site-packages/flask/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..074ee7687b74cd43e4354245a4d9913d98cc1b86
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/cli.py
@@ -0,0 +1,517 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.cli
+ ~~~~~~~~~
+
+ A simple command line application to run flask apps.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import sys
+from threading import Lock, Thread
+from functools import update_wrapper
+
+import click
+
+from ._compat import iteritems, reraise
+from .helpers import get_debug_flag
+from . import __version__
+
+class NoAppException(click.UsageError):
+ """Raised if an application cannot be found or loaded."""
+
+
+def find_best_app(module):
+ """Given a module instance this tries to find the best possible
+ application in the module or raises an exception.
+ """
+ from . import Flask
+
+ # Search for the most common names first.
+ for attr_name in 'app', 'application':
+ app = getattr(module, attr_name, None)
+ if app is not None and isinstance(app, Flask):
+ return app
+
+ # Otherwise find the only object that is a Flask instance.
+ matches = [v for k, v in iteritems(module.__dict__)
+ if isinstance(v, Flask)]
+
+ if len(matches) == 1:
+ return matches[0]
+ raise NoAppException('Failed to find application in module "%s". Are '
+ 'you sure it contains a Flask application? Maybe '
+ 'you wrapped it in a WSGI middleware or you are '
+ 'using a factory function.' % module.__name__)
+
+
+def prepare_exec_for_file(filename):
+ """Given a filename this will try to calculate the python path, add it
+ to the search path and return the actual module name that is expected.
+ """
+ module = []
+
+ # Chop off file extensions or package markers
+ if os.path.split(filename)[1] == '__init__.py':
+ filename = os.path.dirname(filename)
+ elif filename.endswith('.py'):
+ filename = filename[:-3]
+ else:
+ raise NoAppException('The file provided (%s) does exist but is not a '
+ 'valid Python file. This means that it cannot '
+ 'be used as application. Please change the '
+ 'extension to .py' % filename)
+ filename = os.path.realpath(filename)
+
+ dirpath = filename
+ while 1:
+ dirpath, extra = os.path.split(dirpath)
+ module.append(extra)
+ if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
+ break
+
+ sys.path.insert(0, dirpath)
+ return '.'.join(module[::-1])
+
+
+def locate_app(app_id):
+ """Attempts to locate the application."""
+ __traceback_hide__ = True
+ if ':' in app_id:
+ module, app_obj = app_id.split(':', 1)
+ else:
+ module = app_id
+ app_obj = None
+
+ try:
+ __import__(module)
+ except ImportError:
+ # Reraise the ImportError if it occurred within the imported module.
+ # Determine this by checking whether the trace has a depth > 1.
+ if sys.exc_info()[-1].tb_next:
+ raise
+ else:
+ raise NoAppException('The file/path provided (%s) does not appear'
+ ' to exist. Please verify the path is '
+ 'correct. If app is not on PYTHONPATH, '
+ 'ensure the extension is .py' % module)
+
+ mod = sys.modules[module]
+ if app_obj is None:
+ app = find_best_app(mod)
+ else:
+ app = getattr(mod, app_obj, None)
+ if app is None:
+ raise RuntimeError('Failed to find application in module "%s"'
+ % module)
+
+ return app
+
+
+def find_default_import_path():
+ app = os.environ.get('FLASK_APP')
+ if app is None:
+ return
+ if os.path.isfile(app):
+ return prepare_exec_for_file(app)
+ return app
+
+
+def get_version(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ message = 'Flask %(version)s\nPython %(python_version)s'
+ click.echo(message % {
+ 'version': __version__,
+ 'python_version': sys.version,
+ }, color=ctx.color)
+ ctx.exit()
+
+version_option = click.Option(['--version'],
+ help='Show the flask version',
+ expose_value=False,
+ callback=get_version,
+ is_flag=True, is_eager=True)
+
+class DispatchingApp(object):
+ """Special application that dispatches to a Flask application which
+ is imported by name in a background thread. If an error happens
+ it is recorded and shown as part of the WSGI handling which in case
+ of the Werkzeug debugger means that it shows up in the browser.
+ """
+
+ def __init__(self, loader, use_eager_loading=False):
+ self.loader = loader
+ self._app = None
+ self._lock = Lock()
+ self._bg_loading_exc_info = None
+ if use_eager_loading:
+ self._load_unlocked()
+ else:
+ self._load_in_background()
+
+ def _load_in_background(self):
+ def _load_app():
+ __traceback_hide__ = True
+ with self._lock:
+ try:
+ self._load_unlocked()
+ except Exception:
+ self._bg_loading_exc_info = sys.exc_info()
+ t = Thread(target=_load_app, args=())
+ t.start()
+
+ def _flush_bg_loading_exception(self):
+ __traceback_hide__ = True
+ exc_info = self._bg_loading_exc_info
+ if exc_info is not None:
+ self._bg_loading_exc_info = None
+ reraise(*exc_info)
+
+ def _load_unlocked(self):
+ __traceback_hide__ = True
+ self._app = rv = self.loader()
+ self._bg_loading_exc_info = None
+ return rv
+
+ def __call__(self, environ, start_response):
+ __traceback_hide__ = True
+ if self._app is not None:
+ return self._app(environ, start_response)
+ self._flush_bg_loading_exception()
+ with self._lock:
+ if self._app is not None:
+ rv = self._app
+ else:
+ rv = self._load_unlocked()
+ return rv(environ, start_response)
+
+
+class ScriptInfo(object):
+ """Help object to deal with Flask applications. This is usually not
+ necessary to interface with as it's used internally in the dispatching
+ to click. In future versions of Flask this object will most likely play
+ a bigger role. Typically it's created automatically by the
+ :class:`FlaskGroup` but you can also manually create it and pass it
+ onwards as click object.
+ """
+
+ def __init__(self, app_import_path=None, create_app=None):
+ if create_app is None:
+ if app_import_path is None:
+ app_import_path = find_default_import_path()
+ self.app_import_path = app_import_path
+ else:
+ app_import_path = None
+
+ #: Optionally the import path for the Flask application.
+ self.app_import_path = app_import_path
+ #: Optionally a function that is passed the script info to create
+ #: the instance of the application.
+ self.create_app = create_app
+ #: A dictionary with arbitrary data that can be associated with
+ #: this script info.
+ self.data = {}
+ self._loaded_app = None
+
+ def load_app(self):
+ """Loads the Flask app (if not yet loaded) and returns it. Calling
+ this multiple times will just result in the already loaded app to
+ be returned.
+ """
+ __traceback_hide__ = True
+ if self._loaded_app is not None:
+ return self._loaded_app
+ if self.create_app is not None:
+ rv = self.create_app(self)
+ else:
+ if not self.app_import_path:
+ raise NoAppException(
+ 'Could not locate Flask application. You did not provide '
+ 'the FLASK_APP environment variable.\n\nFor more '
+ 'information see '
+ 'http://flask.pocoo.org/docs/latest/quickstart/')
+ rv = locate_app(self.app_import_path)
+ debug = get_debug_flag()
+ if debug is not None:
+ rv.debug = debug
+ self._loaded_app = rv
+ return rv
+
+
+pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
+
+
+def with_appcontext(f):
+ """Wraps a callback so that it's guaranteed to be executed with the
+ script's application context. If callbacks are registered directly
+ to the ``app.cli`` object then they are wrapped with this function
+ by default unless it's disabled.
+ """
+ @click.pass_context
+ def decorator(__ctx, *args, **kwargs):
+ with __ctx.ensure_object(ScriptInfo).load_app().app_context():
+ return __ctx.invoke(f, *args, **kwargs)
+ return update_wrapper(decorator, f)
+
+
+class AppGroup(click.Group):
+ """This works similar to a regular click :class:`~click.Group` but it
+ changes the behavior of the :meth:`command` decorator so that it
+ automatically wraps the functions in :func:`with_appcontext`.
+
+ Not to be confused with :class:`FlaskGroup`.
+ """
+
+ def command(self, *args, **kwargs):
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
+ unless it's disabled by passing ``with_appcontext=False``.
+ """
+ wrap_for_ctx = kwargs.pop('with_appcontext', True)
+ def decorator(f):
+ if wrap_for_ctx:
+ f = with_appcontext(f)
+ return click.Group.command(self, *args, **kwargs)(f)
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it defaults the group class to
+ :class:`AppGroup`.
+ """
+ kwargs.setdefault('cls', AppGroup)
+ return click.Group.group(self, *args, **kwargs)
+
+
+class FlaskGroup(AppGroup):
+ """Special subclass of the :class:`AppGroup` group that supports
+ loading more commands from the configured Flask app. Normally a
+ developer does not have to interface with this class but there are
+ some very advanced use cases for which it makes sense to create an
+ instance of this.
+
+ For information as of why this is useful see :ref:`custom-scripts`.
+
+ :param add_default_commands: if this is True then the default run and
+ shell commands wil be added.
+ :param add_version_option: adds the ``--version`` option.
+ :param create_app: an optional callback that is passed the script info
+ and returns the loaded app.
+ """
+
+ def __init__(self, add_default_commands=True, create_app=None,
+ add_version_option=True, **extra):
+ params = list(extra.pop('params', None) or ())
+
+ if add_version_option:
+ params.append(version_option)
+
+ AppGroup.__init__(self, params=params, **extra)
+ self.create_app = create_app
+
+ if add_default_commands:
+ self.add_command(run_command)
+ self.add_command(shell_command)
+
+ self._loaded_plugin_commands = False
+
+ def _load_plugin_commands(self):
+ if self._loaded_plugin_commands:
+ return
+ try:
+ import pkg_resources
+ except ImportError:
+ self._loaded_plugin_commands = True
+ return
+
+ for ep in pkg_resources.iter_entry_points('flask.commands'):
+ self.add_command(ep.load(), ep.name)
+ self._loaded_plugin_commands = True
+
+ def get_command(self, ctx, name):
+ self._load_plugin_commands()
+
+ # We load built-in commands first as these should always be the
+ # same no matter what the app does. If the app does want to
+ # override this it needs to make a custom instance of this group
+ # and not attach the default commands.
+ #
+ # This also means that the script stays functional in case the
+ # application completely fails.
+ rv = AppGroup.get_command(self, ctx, name)
+ if rv is not None:
+ return rv
+
+ info = ctx.ensure_object(ScriptInfo)
+ try:
+ rv = info.load_app().cli.get_command(ctx, name)
+ if rv is not None:
+ return rv
+ except NoAppException:
+ pass
+
+ def list_commands(self, ctx):
+ self._load_plugin_commands()
+
+ # The commands available is the list of both the application (if
+ # available) plus the builtin commands.
+ rv = set(click.Group.list_commands(self, ctx))
+ info = ctx.ensure_object(ScriptInfo)
+ try:
+ rv.update(info.load_app().cli.list_commands(ctx))
+ except Exception:
+ # Here we intentionally swallow all exceptions as we don't
+ # want the help page to break if the app does not exist.
+ # If someone attempts to use the command we try to create
+ # the app again and this will give us the error.
+ pass
+ return sorted(rv)
+
+ def main(self, *args, **kwargs):
+ obj = kwargs.get('obj')
+ if obj is None:
+ obj = ScriptInfo(create_app=self.create_app)
+ kwargs['obj'] = obj
+ kwargs.setdefault('auto_envvar_prefix', 'FLASK')
+ return AppGroup.main(self, *args, **kwargs)
+
+
+@click.command('run', short_help='Runs a development server.')
+@click.option('--host', '-h', default='127.0.0.1',
+ help='The interface to bind to.')
+@click.option('--port', '-p', default=5000,
+ help='The port to bind to.')
+@click.option('--reload/--no-reload', default=None,
+ help='Enable or disable the reloader. By default the reloader '
+ 'is active if debug is enabled.')
+@click.option('--debugger/--no-debugger', default=None,
+ help='Enable or disable the debugger. By default the debugger '
+ 'is active if debug is enabled.')
+@click.option('--eager-loading/--lazy-loader', default=None,
+ help='Enable or disable eager loading. By default eager '
+ 'loading is enabled if the reloader is disabled.')
+@click.option('--with-threads/--without-threads', default=False,
+ help='Enable or disable multithreading.')
+@pass_script_info
+def run_command(info, host, port, reload, debugger, eager_loading,
+ with_threads):
+ """Runs a local development server for the Flask application.
+
+ This local server is recommended for development purposes only but it
+ can also be used for simple intranet deployments. By default it will
+ not support any sort of concurrency at all to simplify debugging. This
+ can be changed with the --with-threads option which will enable basic
+ multithreading.
+
+ The reloader and debugger are by default enabled if the debug flag of
+ Flask is enabled and disabled otherwise.
+ """
+ from werkzeug.serving import run_simple
+
+ debug = get_debug_flag()
+ if reload is None:
+ reload = bool(debug)
+ if debugger is None:
+ debugger = bool(debug)
+ if eager_loading is None:
+ eager_loading = not reload
+
+ app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
+
+ # Extra startup messages. This depends a bit on Werkzeug internals to
+ # not double execute when the reloader kicks in.
+ if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
+ # If we have an import path we can print it out now which can help
+ # people understand what's being served. If we do not have an
+ # import path because the app was loaded through a callback then
+ # we won't print anything.
+ if info.app_import_path is not None:
+ print(' * Serving Flask app "%s"' % info.app_import_path)
+ if debug is not None:
+ print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
+
+ run_simple(host, port, app, use_reloader=reload,
+ use_debugger=debugger, threaded=with_threads)
+
+
+@click.command('shell', short_help='Runs a shell in the app context.')
+@with_appcontext
+def shell_command():
+ """Runs an interactive Python shell in the context of a given
+ Flask application. The application will populate the default
+ namespace of this shell according to it's configuration.
+
+ This is useful for executing small snippets of management code
+ without having to manually configuring the application.
+ """
+ import code
+ from flask.globals import _app_ctx_stack
+ app = _app_ctx_stack.top.app
+ banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
+ sys.version,
+ sys.platform,
+ app.import_name,
+ app.debug and ' [debug]' or '',
+ app.instance_path,
+ )
+ ctx = {}
+
+ # Support the regular Python interpreter startup script if someone
+ # is using it.
+ startup = os.environ.get('PYTHONSTARTUP')
+ if startup and os.path.isfile(startup):
+ with open(startup, 'r') as f:
+ eval(compile(f.read(), startup, 'exec'), ctx)
+
+ ctx.update(app.make_shell_context())
+
+ code.interact(banner=banner, local=ctx)
+
+
+cli = FlaskGroup(help="""\
+This shell command acts as general utility script for Flask applications.
+
+It loads the application configured (through the FLASK_APP environment
+variable) and then provides commands either provided by the application or
+Flask itself.
+
+The most useful commands are the "run" and "shell" command.
+
+Example usage:
+
+\b
+ %(prefix)s%(cmd)s FLASK_APP=hello.py
+ %(prefix)s%(cmd)s FLASK_DEBUG=1
+ %(prefix)sflask run
+""" % {
+ 'cmd': os.name == 'posix' and 'export' or 'set',
+ 'prefix': os.name == 'posix' and '$ ' or '',
+})
+
+
+def main(as_module=False):
+ this_module = __package__ + '.cli'
+ args = sys.argv[1:]
+
+ if as_module:
+ if sys.version_info >= (2, 7):
+ name = 'python -m ' + this_module.rsplit('.', 1)[0]
+ else:
+ name = 'python -m ' + this_module
+
+ # This module is always executed as "python -m flask.run" and as such
+ # we need to ensure that we restore the actual command line so that
+ # the reloader can properly operate.
+ sys.argv = ['-m', this_module] + sys.argv[1:]
+ else:
+ name = None
+
+ cli.main(args=args, prog_name=name)
+
+
+if __name__ == '__main__':
+ main(as_module=True)
diff --git a/appWSM/lib/python2.7/site-packages/flask/config.py b/appWSM/lib/python2.7/site-packages/flask/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..697add71916da7df72ba2b324adc944375302114
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/config.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.config
+ ~~~~~~~~~~~~
+
+ Implements the configuration related objects.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import types
+import errno
+
+from werkzeug.utils import import_string
+from ._compat import string_types, iteritems
+from . import json
+
+
+class ConfigAttribute(object):
+ """Makes an attribute forward to the config"""
+
+ def __init__(self, name, get_converter=None):
+ self.__name__ = name
+ self.get_converter = get_converter
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ rv = obj.config[self.__name__]
+ if self.get_converter is not None:
+ rv = self.get_converter(rv)
+ return rv
+
+ def __set__(self, obj, value):
+ obj.config[self.__name__] = value
+
+
+class Config(dict):
+ """Works exactly like a dict but provides ways to fill it from files
+ or special dictionaries. There are two common patterns to populate the
+ config.
+
+ Either you can fill the config from a config file::
+
+ app.config.from_pyfile('yourconfig.cfg')
+
+ Or alternatively you can define the configuration options in the
+ module that calls :meth:`from_object` or provide an import path to
+ a module that should be loaded. It is also possible to tell it to
+ use the same module and with that provide the configuration values
+ just before the call::
+
+ DEBUG = True
+ SECRET_KEY = 'development key'
+ app.config.from_object(__name__)
+
+ In both cases (loading from any Python file or loading from modules),
+ only uppercase keys are added to the config. This makes it possible to use
+ lowercase values in the config file for temporary values that are not added
+ to the config or to define the config keys in the same file that implements
+ the application.
+
+ Probably the most interesting way to load configurations is from an
+ environment variable pointing to a file::
+
+ app.config.from_envvar('YOURAPPLICATION_SETTINGS')
+
+ In this case before launching the application you have to set this
+ environment variable to the file you want to use. On Linux and OS X
+ use the export statement::
+
+ export YOURAPPLICATION_SETTINGS='/path/to/config/file'
+
+ On windows use `set` instead.
+
+ :param root_path: path to which files are read relative from. When the
+ config object is created by the application, this is
+ the application's :attr:`~flask.Flask.root_path`.
+ :param defaults: an optional dictionary of default values
+ """
+
+ def __init__(self, root_path, defaults=None):
+ dict.__init__(self, defaults or {})
+ self.root_path = root_path
+
+ def from_envvar(self, variable_name, silent=False):
+ """Loads a configuration from an environment variable pointing to
+ a configuration file. This is basically just a shortcut with nicer
+ error messages for this line of code::
+
+ app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
+
+ :param variable_name: name of the environment variable
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: bool. ``True`` if able to load config, ``False`` otherwise.
+ """
+ rv = os.environ.get(variable_name)
+ if not rv:
+ if silent:
+ return False
+ raise RuntimeError('The environment variable %r is not set '
+ 'and as such configuration could not be '
+ 'loaded. Set this variable and make it '
+ 'point to a configuration file' %
+ variable_name)
+ return self.from_pyfile(rv, silent=silent)
+
+ def from_pyfile(self, filename, silent=False):
+ """Updates the values in the config from a Python file. This function
+ behaves as if the file was imported as module with the
+ :meth:`from_object` function.
+
+ :param filename: the filename of the config. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+
+ .. versionadded:: 0.7
+ `silent` parameter.
+ """
+ filename = os.path.join(self.root_path, filename)
+ d = types.ModuleType('config')
+ d.__file__ = filename
+ try:
+ with open(filename, mode='rb') as config_file:
+ exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
+ except IOError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return False
+ e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+ raise
+ self.from_object(d)
+ return True
+
+ def from_object(self, obj):
+ """Updates the values from the given object. An object can be of one
+ of the following two types:
+
+ - a string: in this case the object with that name will be imported
+ - an actual object reference: that object is used directly
+
+ Objects are usually either modules or classes. :meth:`from_object`
+ loads only the uppercase attributes of the module/class. A ``dict``
+ object will not work with :meth:`from_object` because the keys of a
+ ``dict`` are not attributes of the ``dict`` class.
+
+ Example of module-based configuration::
+
+ app.config.from_object('yourapplication.default_config')
+ from yourapplication import default_config
+ app.config.from_object(default_config)
+
+ You should not use this function to load the actual configuration but
+ rather configuration defaults. The actual config should be loaded
+ with :meth:`from_pyfile` and ideally from a location not within the
+ package because the package might be installed system wide.
+
+ See :ref:`config-dev-prod` for an example of class-based configuration
+ using :meth:`from_object`.
+
+ :param obj: an import name or object
+ """
+ if isinstance(obj, string_types):
+ obj = import_string(obj)
+ for key in dir(obj):
+ if key.isupper():
+ self[key] = getattr(obj, key)
+
+ def from_json(self, filename, silent=False):
+ """Updates the values in the config from a JSON file. This function
+ behaves as if the JSON object was a dictionary and passed to the
+ :meth:`from_mapping` function.
+
+ :param filename: the filename of the JSON file. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+
+ .. versionadded:: 0.11
+ """
+ filename = os.path.join(self.root_path, filename)
+
+ try:
+ with open(filename) as json_file:
+ obj = json.loads(json_file.read())
+ except IOError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return False
+ e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+ raise
+ return self.from_mapping(obj)
+
+ def from_mapping(self, *mapping, **kwargs):
+ """Updates the config like :meth:`update` ignoring items with non-upper
+ keys.
+
+ .. versionadded:: 0.11
+ """
+ mappings = []
+ if len(mapping) == 1:
+ if hasattr(mapping[0], 'items'):
+ mappings.append(mapping[0].items())
+ else:
+ mappings.append(mapping[0])
+ elif len(mapping) > 1:
+ raise TypeError(
+ 'expected at most 1 positional argument, got %d' % len(mapping)
+ )
+ mappings.append(kwargs.items())
+ for mapping in mappings:
+ for (key, value) in mapping:
+ if key.isupper():
+ self[key] = value
+ return True
+
+ def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
+ """Returns a dictionary containing a subset of configuration options
+ that match the specified namespace/prefix. Example usage::
+
+ app.config['IMAGE_STORE_TYPE'] = 'fs'
+ app.config['IMAGE_STORE_PATH'] = '/var/app/images'
+ app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
+ image_store_config = app.config.get_namespace('IMAGE_STORE_')
+
+ The resulting dictionary `image_store_config` would look like::
+
+ {
+ 'type': 'fs',
+ 'path': '/var/app/images',
+ 'base_url': 'http://img.website.com'
+ }
+
+ This is often useful when configuration options map directly to
+ keyword arguments in functions or class constructors.
+
+ :param namespace: a configuration namespace
+ :param lowercase: a flag indicating if the keys of the resulting
+ dictionary should be lowercase
+ :param trim_namespace: a flag indicating if the keys of the resulting
+ dictionary should not include the namespace
+
+ .. versionadded:: 0.11
+ """
+ rv = {}
+ for k, v in iteritems(self):
+ if not k.startswith(namespace):
+ continue
+ if trim_namespace:
+ key = k[len(namespace):]
+ else:
+ key = k
+ if lowercase:
+ key = key.lower()
+ rv[key] = v
+ return rv
+
+ def __repr__(self):
+ return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
diff --git a/appWSM/lib/python2.7/site-packages/flask/ctx.py b/appWSM/lib/python2.7/site-packages/flask/ctx.py
new file mode 100644
index 0000000000000000000000000000000000000000..480d9c5c42f7daed6a90ef51a0fa872b446e7f2e
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/ctx.py
@@ -0,0 +1,410 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.ctx
+ ~~~~~~~~~
+
+ Implements the objects required to keep the context.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import sys
+from functools import update_wrapper
+
+from werkzeug.exceptions import HTTPException
+
+from .globals import _request_ctx_stack, _app_ctx_stack
+from .signals import appcontext_pushed, appcontext_popped
+from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
+
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+
+class _AppCtxGlobals(object):
+ """A plain object."""
+
+ def get(self, name, default=None):
+ return self.__dict__.get(name, default)
+
+ def pop(self, name, default=_sentinel):
+ if default is _sentinel:
+ return self.__dict__.pop(name)
+ else:
+ return self.__dict__.pop(name, default)
+
+ def setdefault(self, name, default=None):
+ return self.__dict__.setdefault(name, default)
+
+ def __contains__(self, item):
+ return item in self.__dict__
+
+ def __iter__(self):
+ return iter(self.__dict__)
+
+ def __repr__(self):
+ top = _app_ctx_stack.top
+ if top is not None:
+ return '' % top.app.name
+ return object.__repr__(self)
+
+
+def after_this_request(f):
+ """Executes a function after this request. This is useful to modify
+ response objects. The function is passed the response object and has
+ to return the same or a new one.
+
+ Example::
+
+ @app.route('/')
+ def index():
+ @after_this_request
+ def add_header(response):
+ response.headers['X-Foo'] = 'Parachute'
+ return response
+ return 'Hello World!'
+
+ This is more useful if a function other than the view function wants to
+ modify a response. For instance think of a decorator that wants to add
+ some headers without converting the return value into a response object.
+
+ .. versionadded:: 0.9
+ """
+ _request_ctx_stack.top._after_request_functions.append(f)
+ return f
+
+
+def copy_current_request_context(f):
+ """A helper function that decorates a function to retain the current
+ request context. This is useful when working with greenlets. The moment
+ the function is decorated a copy of the request context is created and
+ then pushed when the function is called.
+
+ Example::
+
+ import gevent
+ from flask import copy_current_request_context
+
+ @app.route('/')
+ def index():
+ @copy_current_request_context
+ def do_some_work():
+ # do some work here, it can access flask.request like you
+ # would otherwise in the view function.
+ ...
+ gevent.spawn(do_some_work)
+ return 'Regular response'
+
+ .. versionadded:: 0.10
+ """
+ top = _request_ctx_stack.top
+ if top is None:
+ raise RuntimeError('This decorator can only be used at local scopes '
+ 'when a request context is on the stack. For instance within '
+ 'view functions.')
+ reqctx = top.copy()
+ def wrapper(*args, **kwargs):
+ with reqctx:
+ return f(*args, **kwargs)
+ return update_wrapper(wrapper, f)
+
+
+def has_request_context():
+ """If you have code that wants to test if a request context is there or
+ not this function can be used. For instance, you may want to take advantage
+ of request information if the request object is available, but fail
+ silently if it is unavailable.
+
+ ::
+
+ class User(db.Model):
+
+ def __init__(self, username, remote_addr=None):
+ self.username = username
+ if remote_addr is None and has_request_context():
+ remote_addr = request.remote_addr
+ self.remote_addr = remote_addr
+
+ Alternatively you can also just test any of the context bound objects
+ (such as :class:`request` or :class:`g` for truthness)::
+
+ class User(db.Model):
+
+ def __init__(self, username, remote_addr=None):
+ self.username = username
+ if remote_addr is None and request:
+ remote_addr = request.remote_addr
+ self.remote_addr = remote_addr
+
+ .. versionadded:: 0.7
+ """
+ return _request_ctx_stack.top is not None
+
+
+def has_app_context():
+ """Works like :func:`has_request_context` but for the application
+ context. You can also just do a boolean check on the
+ :data:`current_app` object instead.
+
+ .. versionadded:: 0.9
+ """
+ return _app_ctx_stack.top is not None
+
+
+class AppContext(object):
+ """The application context binds an application object implicitly
+ to the current thread or greenlet, similar to how the
+ :class:`RequestContext` binds request information. The application
+ context is also implicitly created if a request context is created
+ but the application is not on top of the individual application
+ context.
+ """
+
+ def __init__(self, app):
+ self.app = app
+ self.url_adapter = app.create_url_adapter(None)
+ self.g = app.app_ctx_globals_class()
+
+ # Like request context, app contexts can be pushed multiple times
+ # but there a basic "refcount" is enough to track them.
+ self._refcnt = 0
+
+ def push(self):
+ """Binds the app context to the current context."""
+ self._refcnt += 1
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
+ _app_ctx_stack.push(self)
+ appcontext_pushed.send(self.app)
+
+ def pop(self, exc=_sentinel):
+ """Pops the app context."""
+ try:
+ self._refcnt -= 1
+ if self._refcnt <= 0:
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_appcontext(exc)
+ finally:
+ rv = _app_ctx_stack.pop()
+ assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
+ % (rv, self)
+ appcontext_popped.send(self.app)
+
+ def __enter__(self):
+ self.push()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.pop(exc_value)
+
+ if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+ reraise(exc_type, exc_value, tb)
+
+
+class RequestContext(object):
+ """The request context contains all request relevant information. It is
+ created at the beginning of the request and pushed to the
+ `_request_ctx_stack` and removed at the end of it. It will create the
+ URL adapter and request object for the WSGI environment provided.
+
+ Do not attempt to use this class directly, instead use
+ :meth:`~flask.Flask.test_request_context` and
+ :meth:`~flask.Flask.request_context` to create this object.
+
+ When the request context is popped, it will evaluate all the
+ functions registered on the application for teardown execution
+ (:meth:`~flask.Flask.teardown_request`).
+
+ The request context is automatically popped at the end of the request
+ for you. In debug mode the request context is kept around if
+ exceptions happen so that interactive debuggers have a chance to
+ introspect the data. With 0.4 this can also be forced for requests
+ that did not fail and outside of ``DEBUG`` mode. By setting
+ ``'flask._preserve_context'`` to ``True`` on the WSGI environment the
+ context will not pop itself at the end of the request. This is used by
+ the :meth:`~flask.Flask.test_client` for example to implement the
+ deferred cleanup functionality.
+
+ You might find this helpful for unittests where you need the
+ information from the context local around for a little longer. Make
+ sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
+ that situation, otherwise your unittests will leak memory.
+ """
+
+ def __init__(self, app, environ, request=None):
+ self.app = app
+ if request is None:
+ request = app.request_class(environ)
+ self.request = request
+ self.url_adapter = app.create_url_adapter(self.request)
+ self.flashes = None
+ self.session = None
+
+ # Request contexts can be pushed multiple times and interleaved with
+ # other request contexts. Now only if the last level is popped we
+ # get rid of them. Additionally if an application context is missing
+ # one is created implicitly so for each level we add this information
+ self._implicit_app_ctx_stack = []
+
+ # indicator if the context was preserved. Next time another context
+ # is pushed the preserved context is popped.
+ self.preserved = False
+
+ # remembers the exception for pop if there is one in case the context
+ # preservation kicks in.
+ self._preserved_exc = None
+
+ # Functions that should be executed after the request on the response
+ # object. These will be called before the regular "after_request"
+ # functions.
+ self._after_request_functions = []
+
+ self.match_request()
+
+ def _get_g(self):
+ return _app_ctx_stack.top.g
+ def _set_g(self, value):
+ _app_ctx_stack.top.g = value
+ g = property(_get_g, _set_g)
+ del _get_g, _set_g
+
+ def copy(self):
+ """Creates a copy of this request context with the same request object.
+ This can be used to move a request context to a different greenlet.
+ Because the actual request object is the same this cannot be used to
+ move a request context to a different thread unless access to the
+ request object is locked.
+
+ .. versionadded:: 0.10
+ """
+ return self.__class__(self.app,
+ environ=self.request.environ,
+ request=self.request
+ )
+
+ def match_request(self):
+ """Can be overridden by a subclass to hook into the matching
+ of the request.
+ """
+ try:
+ url_rule, self.request.view_args = \
+ self.url_adapter.match(return_rule=True)
+ self.request.url_rule = url_rule
+ except HTTPException as e:
+ self.request.routing_exception = e
+
+ def push(self):
+ """Binds the request context to the current context."""
+ # If an exception occurs in debug mode or if context preservation is
+ # activated under exception situations exactly one context stays
+ # on the stack. The rationale is that you want to access that
+ # information under debug situations. However if someone forgets to
+ # pop that context again we want to make sure that on the next push
+ # it's invalidated, otherwise we run at risk that something leaks
+ # memory. This is usually only a problem in test suite since this
+ # functionality is not active in production environments.
+ top = _request_ctx_stack.top
+ if top is not None and top.preserved:
+ top.pop(top._preserved_exc)
+
+ # Before we push the request context we have to ensure that there
+ # is an application context.
+ app_ctx = _app_ctx_stack.top
+ if app_ctx is None or app_ctx.app != self.app:
+ app_ctx = self.app.app_context()
+ app_ctx.push()
+ self._implicit_app_ctx_stack.append(app_ctx)
+ else:
+ self._implicit_app_ctx_stack.append(None)
+
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
+
+ _request_ctx_stack.push(self)
+
+ # Open the session at the moment that the request context is
+ # available. This allows a custom open_session method to use the
+ # request context (e.g. code that access database information
+ # stored on `g` instead of the appcontext).
+ self.session = self.app.open_session(self.request)
+ if self.session is None:
+ self.session = self.app.make_null_session()
+
+ def pop(self, exc=_sentinel):
+ """Pops the request context and unbinds it by doing that. This will
+ also trigger the execution of functions registered by the
+ :meth:`~flask.Flask.teardown_request` decorator.
+
+ .. versionchanged:: 0.9
+ Added the `exc` argument.
+ """
+ app_ctx = self._implicit_app_ctx_stack.pop()
+
+ try:
+ clear_request = False
+ if not self._implicit_app_ctx_stack:
+ self.preserved = False
+ self._preserved_exc = None
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_request(exc)
+
+ # If this interpreter supports clearing the exception information
+ # we do that now. This will only go into effect on Python 2.x,
+ # on 3.x it disappears automatically at the end of the exception
+ # stack.
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
+
+ request_close = getattr(self.request, 'close', None)
+ if request_close is not None:
+ request_close()
+ clear_request = True
+ finally:
+ rv = _request_ctx_stack.pop()
+
+ # get rid of circular dependencies at the end of the request
+ # so that we don't require the GC to be active.
+ if clear_request:
+ rv.request.environ['werkzeug.request'] = None
+
+ # Get rid of the app as well if necessary.
+ if app_ctx is not None:
+ app_ctx.pop(exc)
+
+ assert rv is self, 'Popped wrong request context. ' \
+ '(%r instead of %r)' % (rv, self)
+
+ def auto_pop(self, exc):
+ if self.request.environ.get('flask._preserve_context') or \
+ (exc is not None and self.app.preserve_context_on_exception):
+ self.preserved = True
+ self._preserved_exc = exc
+ else:
+ self.pop(exc)
+
+ def __enter__(self):
+ self.push()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ # do not pop the request stack if we are in debug mode and an
+ # exception happened. This will allow the debugger to still
+ # access the request object in the interactive shell. Furthermore
+ # the context can be force kept alive for the test client.
+ # See flask.testing for how this works.
+ self.auto_pop(exc_value)
+
+ if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+ reraise(exc_type, exc_value, tb)
+
+ def __repr__(self):
+ return '<%s \'%s\' [%s] of %s>' % (
+ self.__class__.__name__,
+ self.request.url,
+ self.request.method,
+ self.app.name,
+ )
diff --git a/appWSM/lib/python2.7/site-packages/flask/debughelpers.py b/appWSM/lib/python2.7/site-packages/flask/debughelpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..90710dd353dff2f975cd7deabeafe19c4a772f42
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/debughelpers.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.debughelpers
+ ~~~~~~~~~~~~~~~~~~
+
+ Various helpers to make the development experience better.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+from ._compat import implements_to_string, text_type
+from .app import Flask
+from .blueprints import Blueprint
+from .globals import _request_ctx_stack
+
+
+class UnexpectedUnicodeError(AssertionError, UnicodeError):
+ """Raised in places where we want some better error reporting for
+ unexpected unicode or binary data.
+ """
+
+
+@implements_to_string
+class DebugFilesKeyError(KeyError, AssertionError):
+ """Raised from request.files during debugging. The idea is that it can
+ provide a better error message than just a generic KeyError/BadRequest.
+ """
+
+ def __init__(self, request, key):
+ form_matches = request.form.getlist(key)
+ buf = ['You tried to access the file "%s" in the request.files '
+ 'dictionary but it does not exist. The mimetype for the request '
+ 'is "%s" instead of "multipart/form-data" which means that no '
+ 'file contents were transmitted. To fix this error you should '
+ 'provide enctype="multipart/form-data" in your form.' %
+ (key, request.mimetype)]
+ if form_matches:
+ buf.append('\n\nThe browser instead transmitted some file names. '
+ 'This was submitted: %s' % ', '.join('"%s"' % x
+ for x in form_matches))
+ self.msg = ''.join(buf)
+
+ def __str__(self):
+ return self.msg
+
+
+class FormDataRoutingRedirect(AssertionError):
+ """This exception is raised by Flask in debug mode if it detects a
+ redirect caused by the routing system when the request method is not
+ GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
+ """
+
+ def __init__(self, request):
+ exc = request.routing_exception
+ buf = ['A request was sent to this URL (%s) but a redirect was '
+ 'issued automatically by the routing system to "%s".'
+ % (request.url, exc.new_url)]
+
+ # In case just a slash was appended we can be extra helpful
+ if request.base_url + '/' == exc.new_url.split('?')[0]:
+ buf.append(' The URL was defined with a trailing slash so '
+ 'Flask will automatically redirect to the URL '
+ 'with the trailing slash if it was accessed '
+ 'without one.')
+
+ buf.append(' Make sure to directly send your %s-request to this URL '
+ 'since we can\'t make browsers or HTTP clients redirect '
+ 'with form data reliably or without user interaction.' %
+ request.method)
+ buf.append('\n\nNote: this exception is only raised in debug mode')
+ AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
+
+
+def attach_enctype_error_multidict(request):
+ """Since Flask 0.8 we're monkeypatching the files object in case a
+ request is detected that does not use multipart form data but the files
+ object is accessed.
+ """
+ oldcls = request.files.__class__
+ class newcls(oldcls):
+ def __getitem__(self, key):
+ try:
+ return oldcls.__getitem__(self, key)
+ except KeyError:
+ if key not in request.form:
+ raise
+ raise DebugFilesKeyError(request, key)
+ newcls.__name__ = oldcls.__name__
+ newcls.__module__ = oldcls.__module__
+ request.files.__class__ = newcls
+
+
+def _dump_loader_info(loader):
+ yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
+ for key, value in sorted(loader.__dict__.items()):
+ if key.startswith('_'):
+ continue
+ if isinstance(value, (tuple, list)):
+ if not all(isinstance(x, (str, text_type)) for x in value):
+ continue
+ yield '%s:' % key
+ for item in value:
+ yield ' - %s' % item
+ continue
+ elif not isinstance(value, (str, text_type, int, float, bool)):
+ continue
+ yield '%s: %r' % (key, value)
+
+
+def explain_template_loading_attempts(app, template, attempts):
+ """This should help developers understand what failed"""
+ info = ['Locating template "%s":' % template]
+ total_found = 0
+ blueprint = None
+ reqctx = _request_ctx_stack.top
+ if reqctx is not None and reqctx.request.blueprint is not None:
+ blueprint = reqctx.request.blueprint
+
+ for idx, (loader, srcobj, triple) in enumerate(attempts):
+ if isinstance(srcobj, Flask):
+ src_info = 'application "%s"' % srcobj.import_name
+ elif isinstance(srcobj, Blueprint):
+ src_info = 'blueprint "%s" (%s)' % (srcobj.name,
+ srcobj.import_name)
+ else:
+ src_info = repr(srcobj)
+
+ info.append('% 5d: trying loader of %s' % (
+ idx + 1, src_info))
+
+ for line in _dump_loader_info(loader):
+ info.append(' %s' % line)
+
+ if triple is None:
+ detail = 'no match'
+ else:
+ detail = 'found (%r)' % (triple[1] or '')
+ total_found += 1
+ info.append(' -> %s' % detail)
+
+ seems_fishy = False
+ if total_found == 0:
+ info.append('Error: the template could not be found.')
+ seems_fishy = True
+ elif total_found > 1:
+ info.append('Warning: multiple loaders returned a match for the template.')
+ seems_fishy = True
+
+ if blueprint is not None and seems_fishy:
+ info.append(' The template was looked up from an endpoint that '
+ 'belongs to the blueprint "%s".' % blueprint)
+ info.append(' Maybe you did not place a template in the right folder?')
+ info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
+
+ app.logger.info('\n'.join(info))
diff --git a/appWSM/lib/python2.7/site-packages/flask/ext/__init__.py b/appWSM/lib/python2.7/site-packages/flask/ext/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..051f44ac4051aa73ef23be0bf8a8873dec5aa4f6
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/ext/__init__.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.ext
+ ~~~~~~~~~
+
+ Redirect imports for extensions. This module basically makes it possible
+ for us to transition from flaskext.foo to flask_foo without having to
+ force all extensions to upgrade at the same time.
+
+ When a user does ``from flask.ext.foo import bar`` it will attempt to
+ import ``from flask_foo import bar`` first and when that fails it will
+ try to import ``from flaskext.foo import bar``.
+
+ We're switching from namespace packages because it was just too painful for
+ everybody involved.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+def setup():
+ from ..exthook import ExtensionImporter
+ importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__)
+ importer.install()
+
+
+setup()
+del setup
diff --git a/appWSM/lib/python2.7/site-packages/flask/exthook.py b/appWSM/lib/python2.7/site-packages/flask/exthook.py
new file mode 100644
index 0000000000000000000000000000000000000000..d88428027a6839b771e1ad9c7c8486573ec5c1d7
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/exthook.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.exthook
+ ~~~~~~~~~~~~~
+
+ Redirect imports for extensions. This module basically makes it possible
+ for us to transition from flaskext.foo to flask_foo without having to
+ force all extensions to upgrade at the same time.
+
+ When a user does ``from flask.ext.foo import bar`` it will attempt to
+ import ``from flask_foo import bar`` first and when that fails it will
+ try to import ``from flaskext.foo import bar``.
+
+ We're switching from namespace packages because it was just too painful for
+ everybody involved.
+
+ This is used by `flask.ext`.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+import os
+import warnings
+from ._compat import reraise
+
+
+class ExtDeprecationWarning(DeprecationWarning):
+ pass
+
+warnings.simplefilter('always', ExtDeprecationWarning)
+
+
+class ExtensionImporter(object):
+ """This importer redirects imports from this submodule to other locations.
+ This makes it possible to transition from the old flaskext.name to the
+ newer flask_name without people having a hard time.
+ """
+
+ def __init__(self, module_choices, wrapper_module):
+ self.module_choices = module_choices
+ self.wrapper_module = wrapper_module
+ self.prefix = wrapper_module + '.'
+ self.prefix_cutoff = wrapper_module.count('.') + 1
+
+ def __eq__(self, other):
+ return self.__class__.__module__ == other.__class__.__module__ and \
+ self.__class__.__name__ == other.__class__.__name__ and \
+ self.wrapper_module == other.wrapper_module and \
+ self.module_choices == other.module_choices
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def install(self):
+ sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
+
+ def find_module(self, fullname, path=None):
+ if fullname.startswith(self.prefix) and \
+ fullname != 'flask.ext.ExtDeprecationWarning':
+ return self
+
+ def load_module(self, fullname):
+ if fullname in sys.modules:
+ return sys.modules[fullname]
+
+ modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
+
+ warnings.warn(
+ "Importing flask.ext.{x} is deprecated, use flask_{x} instead."
+ .format(x=modname), ExtDeprecationWarning, stacklevel=2
+ )
+
+ for path in self.module_choices:
+ realname = path % modname
+ try:
+ __import__(realname)
+ except ImportError:
+ exc_type, exc_value, tb = sys.exc_info()
+ # since we only establish the entry in sys.modules at the
+ # very this seems to be redundant, but if recursive imports
+ # happen we will call into the move import a second time.
+ # On the second invocation we still don't have an entry for
+ # fullname in sys.modules, but we will end up with the same
+ # fake module name and that import will succeed since this
+ # one already has a temporary entry in the modules dict.
+ # Since this one "succeeded" temporarily that second
+ # invocation now will have created a fullname entry in
+ # sys.modules which we have to kill.
+ sys.modules.pop(fullname, None)
+
+ # If it's an important traceback we reraise it, otherwise
+ # we swallow it and try the next choice. The skipped frame
+ # is the one from __import__ above which we don't care about
+ if self.is_important_traceback(realname, tb):
+ reraise(exc_type, exc_value, tb.tb_next)
+ continue
+ module = sys.modules[fullname] = sys.modules[realname]
+ if '.' not in modname:
+ setattr(sys.modules[self.wrapper_module], modname, module)
+
+ if realname.startswith('flaskext.'):
+ warnings.warn(
+ "Detected extension named flaskext.{x}, please rename it "
+ "to flask_{x}. The old form is deprecated."
+ .format(x=modname), ExtDeprecationWarning
+ )
+
+ return module
+ raise ImportError('No module named %s' % fullname)
+
+ def is_important_traceback(self, important_module, tb):
+ """Walks a traceback's frames and checks if any of the frames
+ originated in the given important module. If that is the case then we
+ were able to import the module itself but apparently something went
+ wrong when the module was imported. (Eg: import of an import failed).
+ """
+ while tb is not None:
+ if self.is_important_frame(important_module, tb):
+ return True
+ tb = tb.tb_next
+ return False
+
+ def is_important_frame(self, important_module, tb):
+ """Checks a single frame if it's important."""
+ g = tb.tb_frame.f_globals
+ if '__name__' not in g:
+ return False
+
+ module_name = g['__name__']
+
+ # Python 2.7 Behavior. Modules are cleaned up late so the
+ # name shows up properly here. Success!
+ if module_name == important_module:
+ return True
+
+ # Some python versions will clean up modules so early that the
+ # module name at that point is no longer set. Try guessing from
+ # the filename then.
+ filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
+ test_string = os.path.sep + important_module.replace('.', os.path.sep)
+ return test_string + '.py' in filename or \
+ test_string + os.path.sep + '__init__.py' in filename
diff --git a/appWSM/lib/python2.7/site-packages/flask/globals.py b/appWSM/lib/python2.7/site-packages/flask/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b70a3ef925bee137b93933517c1aa0e268bbbfd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/globals.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.globals
+ ~~~~~~~~~~~~~
+
+ Defines all the global objects that are proxies to the current
+ active context.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+from functools import partial
+from werkzeug.local import LocalStack, LocalProxy
+
+
+_request_ctx_err_msg = '''\
+Working outside of request context.
+
+This typically means that you attempted to use functionality that needed
+an active HTTP request. Consult the documentation on testing for
+information about how to avoid this problem.\
+'''
+_app_ctx_err_msg = '''\
+Working outside of application context.
+
+This typically means that you attempted to use functionality that needed
+to interface with the current application object in a way. To solve
+this set up an application context with app.app_context(). See the
+documentation for more information.\
+'''
+
+
+def _lookup_req_object(name):
+ top = _request_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_request_ctx_err_msg)
+ return getattr(top, name)
+
+
+def _lookup_app_object(name):
+ top = _app_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_app_ctx_err_msg)
+ return getattr(top, name)
+
+
+def _find_app():
+ top = _app_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_app_ctx_err_msg)
+ return top.app
+
+
+# context locals
+_request_ctx_stack = LocalStack()
+_app_ctx_stack = LocalStack()
+current_app = LocalProxy(_find_app)
+request = LocalProxy(partial(_lookup_req_object, 'request'))
+session = LocalProxy(partial(_lookup_req_object, 'session'))
+g = LocalProxy(partial(_lookup_app_object, 'g'))
diff --git a/appWSM/lib/python2.7/site-packages/flask/helpers.py b/appWSM/lib/python2.7/site-packages/flask/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bb1d1c9157950f188fe8a1c71e79e72e969a16c
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/helpers.py
@@ -0,0 +1,966 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.helpers
+ ~~~~~~~~~~~~~
+
+ Implements various helpers.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import sys
+import pkgutil
+import posixpath
+import mimetypes
+from time import time
+from zlib import adler32
+from threading import RLock
+from werkzeug.routing import BuildError
+from functools import update_wrapper
+
+try:
+ from werkzeug.urls import url_quote
+except ImportError:
+ from urlparse import quote as url_quote
+
+from werkzeug.datastructures import Headers, Range
+from werkzeug.exceptions import BadRequest, NotFound, \
+ RequestedRangeNotSatisfiable
+
+# this was moved in 0.7
+try:
+ from werkzeug.wsgi import wrap_file
+except ImportError:
+ from werkzeug.utils import wrap_file
+
+from jinja2 import FileSystemLoader
+
+from .signals import message_flashed
+from .globals import session, _request_ctx_stack, _app_ctx_stack, \
+ current_app, request
+from ._compat import string_types, text_type
+
+
+# sentinel
+_missing = object()
+
+
+# what separators does this operating system provide that are not a slash?
+# this is used by the send_from_directory function to ensure that nobody is
+# able to access files from outside the filesystem.
+_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
+ if sep not in (None, '/'))
+
+
+def get_debug_flag(default=None):
+ val = os.environ.get('FLASK_DEBUG')
+ if not val:
+ return default
+ return val not in ('0', 'false', 'no')
+
+
+def _endpoint_from_view_func(view_func):
+ """Internal helper that returns the default endpoint for a given
+ function. This always is the function name.
+ """
+ assert view_func is not None, 'expected view func if endpoint ' \
+ 'is not provided.'
+ return view_func.__name__
+
+
+def stream_with_context(generator_or_function):
+ """Request contexts disappear when the response is started on the server.
+ This is done for efficiency reasons and to make it less likely to encounter
+ memory leaks with badly written WSGI middlewares. The downside is that if
+ you are using streamed responses, the generator cannot access request bound
+ information any more.
+
+ This function however can help you keep the context around for longer::
+
+ from flask import stream_with_context, request, Response
+
+ @app.route('/stream')
+ def streamed_response():
+ @stream_with_context
+ def generate():
+ yield 'Hello '
+ yield request.args['name']
+ yield '!'
+ return Response(generate())
+
+ Alternatively it can also be used around a specific generator::
+
+ from flask import stream_with_context, request, Response
+
+ @app.route('/stream')
+ def streamed_response():
+ def generate():
+ yield 'Hello '
+ yield request.args['name']
+ yield '!'
+ return Response(stream_with_context(generate()))
+
+ .. versionadded:: 0.9
+ """
+ try:
+ gen = iter(generator_or_function)
+ except TypeError:
+ def decorator(*args, **kwargs):
+ gen = generator_or_function(*args, **kwargs)
+ return stream_with_context(gen)
+ return update_wrapper(decorator, generator_or_function)
+
+ def generator():
+ ctx = _request_ctx_stack.top
+ if ctx is None:
+ raise RuntimeError('Attempted to stream with context but '
+ 'there was no context in the first place to keep around.')
+ with ctx:
+ # Dummy sentinel. Has to be inside the context block or we're
+ # not actually keeping the context around.
+ yield None
+
+ # The try/finally is here so that if someone passes a WSGI level
+ # iterator in we're still running the cleanup logic. Generators
+ # don't need that because they are closed on their destruction
+ # automatically.
+ try:
+ for item in gen:
+ yield item
+ finally:
+ if hasattr(gen, 'close'):
+ gen.close()
+
+ # The trick is to start the generator. Then the code execution runs until
+ # the first dummy None is yielded at which point the context was already
+ # pushed. This item is discarded. Then when the iteration continues the
+ # real generator is executed.
+ wrapped_g = generator()
+ next(wrapped_g)
+ return wrapped_g
+
+
+def make_response(*args):
+ """Sometimes it is necessary to set additional headers in a view. Because
+ views do not have to return response objects but can return a value that
+ is converted into a response object by Flask itself, it becomes tricky to
+ add headers to it. This function can be called instead of using a return
+ and you will get a response object which you can use to attach headers.
+
+ If view looked like this and you want to add a new header::
+
+ def index():
+ return render_template('index.html', foo=42)
+
+ You can now do something like this::
+
+ def index():
+ response = make_response(render_template('index.html', foo=42))
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+ return response
+
+ This function accepts the very same arguments you can return from a
+ view function. This for example creates a response with a 404 error
+ code::
+
+ response = make_response(render_template('not_found.html'), 404)
+
+ The other use case of this function is to force the return value of a
+ view function into a response which is helpful with view
+ decorators::
+
+ response = make_response(view_function())
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+
+ Internally this function does the following things:
+
+ - if no arguments are passed, it creates a new response argument
+ - if one argument is passed, :meth:`flask.Flask.make_response`
+ is invoked with it.
+ - if more than one argument is passed, the arguments are passed
+ to the :meth:`flask.Flask.make_response` function as tuple.
+
+ .. versionadded:: 0.6
+ """
+ if not args:
+ return current_app.response_class()
+ if len(args) == 1:
+ args = args[0]
+ return current_app.make_response(args)
+
+
+def url_for(endpoint, **values):
+ """Generates a URL to the given endpoint with the method provided.
+
+ Variable arguments that are unknown to the target endpoint are appended
+ to the generated URL as query arguments. If the value of a query argument
+ is ``None``, the whole pair is skipped. In case blueprints are active
+ you can shortcut references to the same blueprint by prefixing the
+ local endpoint with a dot (``.``).
+
+ This will reference the index function local to the current blueprint::
+
+ url_for('.index')
+
+ For more information, head over to the :ref:`Quickstart `.
+
+ To integrate applications, :class:`Flask` has a hook to intercept URL build
+ errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
+ function results in a :exc:`~werkzeug.routing.BuildError` when the current
+ app does not have a URL for the given endpoint and values. When it does, the
+ :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
+ it is not ``None``, which can return a string to use as the result of
+ `url_for` (instead of `url_for`'s default to raise the
+ :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
+ An example::
+
+ def external_url_handler(error, endpoint, values):
+ "Looks up an external URL when `url_for` cannot build a URL."
+ # This is an example of hooking the build_error_handler.
+ # Here, lookup_url is some utility function you've built
+ # which looks up the endpoint in some external URL registry.
+ url = lookup_url(endpoint, **values)
+ if url is None:
+ # External lookup did not have a URL.
+ # Re-raise the BuildError, in context of original traceback.
+ exc_type, exc_value, tb = sys.exc_info()
+ if exc_value is error:
+ raise exc_type, exc_value, tb
+ else:
+ raise error
+ # url_for will use this result, instead of raising BuildError.
+ return url
+
+ app.url_build_error_handlers.append(external_url_handler)
+
+ Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
+ `endpoint` and `values` are the arguments passed into `url_for`. Note
+ that this is for building URLs outside the current application, and not for
+ handling 404 NotFound errors.
+
+ .. versionadded:: 0.10
+ The `_scheme` parameter was added.
+
+ .. versionadded:: 0.9
+ The `_anchor` and `_method` parameters were added.
+
+ .. versionadded:: 0.9
+ Calls :meth:`Flask.handle_build_error` on
+ :exc:`~werkzeug.routing.BuildError`.
+
+ :param endpoint: the endpoint of the URL (name of the function)
+ :param values: the variable arguments of the URL rule
+ :param _external: if set to ``True``, an absolute URL is generated. Server
+ address can be changed via ``SERVER_NAME`` configuration variable which
+ defaults to `localhost`.
+ :param _scheme: a string specifying the desired URL scheme. The `_external`
+ parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
+ behavior uses the same scheme as the current request, or
+ ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no
+ request context is available. As of Werkzeug 0.10, this also can be set
+ to an empty string to build protocol-relative URLs.
+ :param _anchor: if provided this is added as anchor to the URL.
+ :param _method: if provided this explicitly specifies an HTTP method.
+ """
+ appctx = _app_ctx_stack.top
+ reqctx = _request_ctx_stack.top
+ if appctx is None:
+ raise RuntimeError('Attempted to generate a URL without the '
+ 'application context being pushed. This has to be '
+ 'executed when application context is available.')
+
+ # If request specific information is available we have some extra
+ # features that support "relative" URLs.
+ if reqctx is not None:
+ url_adapter = reqctx.url_adapter
+ blueprint_name = request.blueprint
+ if not reqctx.request._is_old_module:
+ if endpoint[:1] == '.':
+ if blueprint_name is not None:
+ endpoint = blueprint_name + endpoint
+ else:
+ endpoint = endpoint[1:]
+ else:
+ # TODO: get rid of this deprecated functionality in 1.0
+ if '.' not in endpoint:
+ if blueprint_name is not None:
+ endpoint = blueprint_name + '.' + endpoint
+ elif endpoint.startswith('.'):
+ endpoint = endpoint[1:]
+ external = values.pop('_external', False)
+
+ # Otherwise go with the url adapter from the appctx and make
+ # the URLs external by default.
+ else:
+ url_adapter = appctx.url_adapter
+ if url_adapter is None:
+ raise RuntimeError('Application was not able to create a URL '
+ 'adapter for request independent URL generation. '
+ 'You might be able to fix this by setting '
+ 'the SERVER_NAME config variable.')
+ external = values.pop('_external', True)
+
+ anchor = values.pop('_anchor', None)
+ method = values.pop('_method', None)
+ scheme = values.pop('_scheme', None)
+ appctx.app.inject_url_defaults(endpoint, values)
+
+ # This is not the best way to deal with this but currently the
+ # underlying Werkzeug router does not support overriding the scheme on
+ # a per build call basis.
+ old_scheme = None
+ if scheme is not None:
+ if not external:
+ raise ValueError('When specifying _scheme, _external must be True')
+ old_scheme = url_adapter.url_scheme
+ url_adapter.url_scheme = scheme
+
+ try:
+ try:
+ rv = url_adapter.build(endpoint, values, method=method,
+ force_external=external)
+ finally:
+ if old_scheme is not None:
+ url_adapter.url_scheme = old_scheme
+ except BuildError as error:
+ # We need to inject the values again so that the app callback can
+ # deal with that sort of stuff.
+ values['_external'] = external
+ values['_anchor'] = anchor
+ values['_method'] = method
+ return appctx.app.handle_url_build_error(error, endpoint, values)
+
+ if anchor is not None:
+ rv += '#' + url_quote(anchor)
+ return rv
+
+
+def get_template_attribute(template_name, attribute):
+ """Loads a macro (or variable) a template exports. This can be used to
+ invoke a macro from within Python code. If you for example have a
+ template named :file:`_cider.html` with the following contents:
+
+ .. sourcecode:: html+jinja
+
+ {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
+
+ You can access this from Python code like this::
+
+ hello = get_template_attribute('_cider.html', 'hello')
+ return hello('World')
+
+ .. versionadded:: 0.2
+
+ :param template_name: the name of the template
+ :param attribute: the name of the variable of macro to access
+ """
+ return getattr(current_app.jinja_env.get_template(template_name).module,
+ attribute)
+
+
+def flash(message, category='message'):
+ """Flashes a message to the next request. In order to remove the
+ flashed message from the session and to display it to the user,
+ the template has to call :func:`get_flashed_messages`.
+
+ .. versionchanged:: 0.3
+ `category` parameter added.
+
+ :param message: the message to be flashed.
+ :param category: the category for the message. The following values
+ are recommended: ``'message'`` for any kind of message,
+ ``'error'`` for errors, ``'info'`` for information
+ messages and ``'warning'`` for warnings. However any
+ kind of string can be used as category.
+ """
+ # Original implementation:
+ #
+ # session.setdefault('_flashes', []).append((category, message))
+ #
+ # This assumed that changes made to mutable structures in the session are
+ # are always in sync with the session object, which is not true for session
+ # implementations that use external storage for keeping their keys/values.
+ flashes = session.get('_flashes', [])
+ flashes.append((category, message))
+ session['_flashes'] = flashes
+ message_flashed.send(current_app._get_current_object(),
+ message=message, category=category)
+
+
+def get_flashed_messages(with_categories=False, category_filter=[]):
+ """Pulls all flashed messages from the session and returns them.
+ Further calls in the same request to the function will return
+ the same messages. By default just the messages are returned,
+ but when `with_categories` is set to ``True``, the return value will
+ be a list of tuples in the form ``(category, message)`` instead.
+
+ Filter the flashed messages to one or more categories by providing those
+ categories in `category_filter`. This allows rendering categories in
+ separate html blocks. The `with_categories` and `category_filter`
+ arguments are distinct:
+
+ * `with_categories` controls whether categories are returned with message
+ text (``True`` gives a tuple, where ``False`` gives just the message text).
+ * `category_filter` filters the messages down to only those matching the
+ provided categories.
+
+ See :ref:`message-flashing-pattern` for examples.
+
+ .. versionchanged:: 0.3
+ `with_categories` parameter added.
+
+ .. versionchanged:: 0.9
+ `category_filter` parameter added.
+
+ :param with_categories: set to ``True`` to also receive categories.
+ :param category_filter: whitelist of categories to limit return values
+ """
+ flashes = _request_ctx_stack.top.flashes
+ if flashes is None:
+ _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
+ if '_flashes' in session else []
+ if category_filter:
+ flashes = list(filter(lambda f: f[0] in category_filter, flashes))
+ if not with_categories:
+ return [x[1] for x in flashes]
+ return flashes
+
+
+def send_file(filename_or_fp, mimetype=None, as_attachment=False,
+ attachment_filename=None, add_etags=True,
+ cache_timeout=None, conditional=False, last_modified=None):
+ """Sends the contents of a file to the client. This will use the
+ most efficient method available and configured. By default it will
+ try to use the WSGI server's file_wrapper support. Alternatively
+ you can set the application's :attr:`~Flask.use_x_sendfile` attribute
+ to ``True`` to directly emit an ``X-Sendfile`` header. This however
+ requires support of the underlying webserver for ``X-Sendfile``.
+
+ By default it will try to guess the mimetype for you, but you can
+ also explicitly provide one. For extra security you probably want
+ to send certain files as attachment (HTML for instance). The mimetype
+ guessing requires a `filename` or an `attachment_filename` to be
+ provided.
+
+ ETags will also be attached automatically if a `filename` is provided. You
+ can turn this off by setting `add_etags=False`.
+
+ If `conditional=True` and `filename` is provided, this method will try to
+ upgrade the response stream to support range requests. This will allow
+ the request to be answered with partial content response.
+
+ Please never pass filenames to this function from user sources;
+ you should use :func:`send_from_directory` instead.
+
+ .. versionadded:: 0.2
+
+ .. versionadded:: 0.5
+ The `add_etags`, `cache_timeout` and `conditional` parameters were
+ added. The default behavior is now to attach etags.
+
+ .. versionchanged:: 0.7
+ mimetype guessing and etag support for file objects was
+ deprecated because it was unreliable. Pass a filename if you are
+ able to, otherwise attach an etag yourself. This functionality
+ will be removed in Flask 1.0
+
+ .. versionchanged:: 0.9
+ cache_timeout pulls its default from application config, when None.
+
+ .. versionchanged:: 0.12
+ The filename is no longer automatically inferred from file objects. If
+ you want to use automatic mimetype and etag support, pass a filepath via
+ `filename_or_fp` or `attachment_filename`.
+
+ .. versionchanged:: 0.12
+ The `attachment_filename` is preferred over `filename` for MIME-type
+ detection.
+
+ :param filename_or_fp: the filename of the file to send in `latin-1`.
+ This is relative to the :attr:`~Flask.root_path`
+ if a relative path is specified.
+ Alternatively a file object might be provided in
+ which case ``X-Sendfile`` might not work and fall
+ back to the traditional method. Make sure that the
+ file pointer is positioned at the start of data to
+ send before calling :func:`send_file`.
+ :param mimetype: the mimetype of the file if provided. If a file path is
+ given, auto detection happens as fallback, otherwise an
+ error will be raised.
+ :param as_attachment: set to ``True`` if you want to send this file with
+ a ``Content-Disposition: attachment`` header.
+ :param attachment_filename: the filename for the attachment if it
+ differs from the file's filename.
+ :param add_etags: set to ``False`` to disable attaching of etags.
+ :param conditional: set to ``True`` to enable conditional responses.
+
+ :param cache_timeout: the timeout in seconds for the headers. When ``None``
+ (default), this value is set by
+ :meth:`~Flask.get_send_file_max_age` of
+ :data:`~flask.current_app`.
+ :param last_modified: set the ``Last-Modified`` header to this value,
+ a :class:`~datetime.datetime` or timestamp.
+ If a file was passed, this overrides its mtime.
+ """
+ mtime = None
+ fsize = None
+ if isinstance(filename_or_fp, string_types):
+ filename = filename_or_fp
+ if not os.path.isabs(filename):
+ filename = os.path.join(current_app.root_path, filename)
+ file = None
+ if attachment_filename is None:
+ attachment_filename = os.path.basename(filename)
+ else:
+ file = filename_or_fp
+ filename = None
+
+ if mimetype is None:
+ if attachment_filename is not None:
+ mimetype = mimetypes.guess_type(attachment_filename)[0] \
+ or 'application/octet-stream'
+
+ if mimetype is None:
+ raise ValueError(
+ 'Unable to infer MIME-type because no filename is available. '
+ 'Please set either `attachment_filename`, pass a filepath to '
+ '`filename_or_fp` or set your own MIME-type via `mimetype`.'
+ )
+
+ headers = Headers()
+ if as_attachment:
+ if attachment_filename is None:
+ raise TypeError('filename unavailable, required for '
+ 'sending as attachment')
+ headers.add('Content-Disposition', 'attachment',
+ filename=attachment_filename)
+
+ if current_app.use_x_sendfile and filename:
+ if file is not None:
+ file.close()
+ headers['X-Sendfile'] = filename
+ fsize = os.path.getsize(filename)
+ headers['Content-Length'] = fsize
+ data = None
+ else:
+ if file is None:
+ file = open(filename, 'rb')
+ mtime = os.path.getmtime(filename)
+ fsize = os.path.getsize(filename)
+ headers['Content-Length'] = fsize
+ data = wrap_file(request.environ, file)
+
+ rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
+ direct_passthrough=True)
+
+ if last_modified is not None:
+ rv.last_modified = last_modified
+ elif mtime is not None:
+ rv.last_modified = mtime
+
+ rv.cache_control.public = True
+ if cache_timeout is None:
+ cache_timeout = current_app.get_send_file_max_age(filename)
+ if cache_timeout is not None:
+ rv.cache_control.max_age = cache_timeout
+ rv.expires = int(time() + cache_timeout)
+
+ if add_etags and filename is not None:
+ from warnings import warn
+
+ try:
+ rv.set_etag('%s-%s-%s' % (
+ os.path.getmtime(filename),
+ os.path.getsize(filename),
+ adler32(
+ filename.encode('utf-8') if isinstance(filename, text_type)
+ else filename
+ ) & 0xffffffff
+ ))
+ except OSError:
+ warn('Access %s failed, maybe it does not exist, so ignore etags in '
+ 'headers' % filename, stacklevel=2)
+
+ if conditional:
+ if callable(getattr(Range, 'to_content_range_header', None)):
+ # Werkzeug supports Range Requests
+ # Remove this test when support for Werkzeug <0.12 is dropped
+ try:
+ rv = rv.make_conditional(request, accept_ranges=True,
+ complete_length=fsize)
+ except RequestedRangeNotSatisfiable:
+ file.close()
+ raise
+ else:
+ rv = rv.make_conditional(request)
+ # make sure we don't send x-sendfile for servers that
+ # ignore the 304 status code for x-sendfile.
+ if rv.status_code == 304:
+ rv.headers.pop('x-sendfile', None)
+ return rv
+
+
+def safe_join(directory, *pathnames):
+ """Safely join `directory` and zero or more untrusted `pathnames`
+ components.
+
+ Example usage::
+
+ @app.route('/wiki/')
+ def wiki_page(filename):
+ filename = safe_join(app.config['WIKI_FOLDER'], filename)
+ with open(filename, 'rb') as fd:
+ content = fd.read() # Read and process the file content...
+
+ :param directory: the trusted base directory.
+ :param pathnames: the untrusted pathnames relative to that directory.
+ :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed
+ paths fall out of its boundaries.
+ """
+
+ parts = [directory]
+
+ for filename in pathnames:
+ if filename != '':
+ filename = posixpath.normpath(filename)
+
+ if (
+ any(sep in filename for sep in _os_alt_seps)
+ or os.path.isabs(filename)
+ or filename == '..'
+ or filename.startswith('../')
+ ):
+ raise NotFound()
+
+ parts.append(filename)
+
+ return posixpath.join(*parts)
+
+
+def send_from_directory(directory, filename, **options):
+ """Send a file from a given directory with :func:`send_file`. This
+ is a secure way to quickly expose static files from an upload folder
+ or something similar.
+
+ Example usage::
+
+ @app.route('/uploads/')
+ def download_file(filename):
+ return send_from_directory(app.config['UPLOAD_FOLDER'],
+ filename, as_attachment=True)
+
+ .. admonition:: Sending files and Performance
+
+ It is strongly recommended to activate either ``X-Sendfile`` support in
+ your webserver or (if no authentication happens) to tell the webserver
+ to serve files for the given path on its own without calling into the
+ web application for improved performance.
+
+ .. versionadded:: 0.5
+
+ :param directory: the directory where all the files are stored.
+ :param filename: the filename relative to that directory to
+ download.
+ :param options: optional keyword arguments that are directly
+ forwarded to :func:`send_file`.
+ """
+ filename = safe_join(directory, filename)
+ if not os.path.isabs(filename):
+ filename = os.path.join(current_app.root_path, filename)
+ try:
+ if not os.path.isfile(filename):
+ raise NotFound()
+ except (TypeError, ValueError):
+ raise BadRequest()
+ options.setdefault('conditional', True)
+ return send_file(filename, **options)
+
+
+def get_root_path(import_name):
+ """Returns the path to a package or cwd if that cannot be found. This
+ returns the path of a package or the folder that contains a module.
+
+ Not to be confused with the package path returned by :func:`find_package`.
+ """
+ # Module already imported and has a file attribute. Use that first.
+ mod = sys.modules.get(import_name)
+ if mod is not None and hasattr(mod, '__file__'):
+ return os.path.dirname(os.path.abspath(mod.__file__))
+
+ # Next attempt: check the loader.
+ loader = pkgutil.get_loader(import_name)
+
+ # Loader does not exist or we're referring to an unloaded main module
+ # or a main module without path (interactive sessions), go with the
+ # current working directory.
+ if loader is None or import_name == '__main__':
+ return os.getcwd()
+
+ # For .egg, zipimporter does not have get_filename until Python 2.7.
+ # Some other loaders might exhibit the same behavior.
+ if hasattr(loader, 'get_filename'):
+ filepath = loader.get_filename(import_name)
+ else:
+ # Fall back to imports.
+ __import__(import_name)
+ mod = sys.modules[import_name]
+ filepath = getattr(mod, '__file__', None)
+
+ # If we don't have a filepath it might be because we are a
+ # namespace package. In this case we pick the root path from the
+ # first module that is contained in our package.
+ if filepath is None:
+ raise RuntimeError('No root path can be found for the provided '
+ 'module "%s". This can happen because the '
+ 'module came from an import hook that does '
+ 'not provide file name information or because '
+ 'it\'s a namespace package. In this case '
+ 'the root path needs to be explicitly '
+ 'provided.' % import_name)
+
+ # filepath is import_name.py for a module, or __init__.py for a package.
+ return os.path.dirname(os.path.abspath(filepath))
+
+
+def _matching_loader_thinks_module_is_package(loader, mod_name):
+ """Given the loader that loaded a module and the module this function
+ attempts to figure out if the given module is actually a package.
+ """
+ # If the loader can tell us if something is a package, we can
+ # directly ask the loader.
+ if hasattr(loader, 'is_package'):
+ return loader.is_package(mod_name)
+ # importlib's namespace loaders do not have this functionality but
+ # all the modules it loads are packages, so we can take advantage of
+ # this information.
+ elif (loader.__class__.__module__ == '_frozen_importlib' and
+ loader.__class__.__name__ == 'NamespaceLoader'):
+ return True
+ # Otherwise we need to fail with an error that explains what went
+ # wrong.
+ raise AttributeError(
+ ('%s.is_package() method is missing but is required by Flask of '
+ 'PEP 302 import hooks. If you do not use import hooks and '
+ 'you encounter this error please file a bug against Flask.') %
+ loader.__class__.__name__)
+
+
+def find_package(import_name):
+ """Finds a package and returns the prefix (or None if the package is
+ not installed) as well as the folder that contains the package or
+ module as a tuple. The package path returned is the module that would
+ have to be added to the pythonpath in order to make it possible to
+ import the module. The prefix is the path below which a UNIX like
+ folder structure exists (lib, share etc.).
+ """
+ root_mod_name = import_name.split('.')[0]
+ loader = pkgutil.get_loader(root_mod_name)
+ if loader is None or import_name == '__main__':
+ # import name is not found, or interactive/main module
+ package_path = os.getcwd()
+ else:
+ # For .egg, zipimporter does not have get_filename until Python 2.7.
+ if hasattr(loader, 'get_filename'):
+ filename = loader.get_filename(root_mod_name)
+ elif hasattr(loader, 'archive'):
+ # zipimporter's loader.archive points to the .egg or .zip
+ # archive filename is dropped in call to dirname below.
+ filename = loader.archive
+ else:
+ # At least one loader is missing both get_filename and archive:
+ # Google App Engine's HardenedModulesHook
+ #
+ # Fall back to imports.
+ __import__(import_name)
+ filename = sys.modules[import_name].__file__
+ package_path = os.path.abspath(os.path.dirname(filename))
+
+ # In case the root module is a package we need to chop of the
+ # rightmost part. This needs to go through a helper function
+ # because of python 3.3 namespace packages.
+ if _matching_loader_thinks_module_is_package(
+ loader, root_mod_name):
+ package_path = os.path.dirname(package_path)
+
+ site_parent, site_folder = os.path.split(package_path)
+ py_prefix = os.path.abspath(sys.prefix)
+ if package_path.startswith(py_prefix):
+ return py_prefix, package_path
+ elif site_folder.lower() == 'site-packages':
+ parent, folder = os.path.split(site_parent)
+ # Windows like installations
+ if folder.lower() == 'lib':
+ base_dir = parent
+ # UNIX like installations
+ elif os.path.basename(parent).lower() == 'lib':
+ base_dir = os.path.dirname(parent)
+ else:
+ base_dir = site_parent
+ return base_dir, package_path
+ return None, package_path
+
+
+class locked_cached_property(object):
+ """A decorator that converts a function into a lazy property. The
+ function wrapped is called the first time to retrieve the result
+ and then that calculated result is used the next time you access
+ the value. Works like the one in Werkzeug but has a lock for
+ thread safety.
+ """
+
+ def __init__(self, func, name=None, doc=None):
+ self.__name__ = name or func.__name__
+ self.__module__ = func.__module__
+ self.__doc__ = doc or func.__doc__
+ self.func = func
+ self.lock = RLock()
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ with self.lock:
+ value = obj.__dict__.get(self.__name__, _missing)
+ if value is _missing:
+ value = self.func(obj)
+ obj.__dict__[self.__name__] = value
+ return value
+
+
+class _PackageBoundObject(object):
+
+ def __init__(self, import_name, template_folder=None, root_path=None):
+ #: The name of the package or module. Do not change this once
+ #: it was set by the constructor.
+ self.import_name = import_name
+
+ #: location of the templates. ``None`` if templates should not be
+ #: exposed.
+ self.template_folder = template_folder
+
+ if root_path is None:
+ root_path = get_root_path(self.import_name)
+
+ #: Where is the app root located?
+ self.root_path = root_path
+
+ self._static_folder = None
+ self._static_url_path = None
+
+ def _get_static_folder(self):
+ if self._static_folder is not None:
+ return os.path.join(self.root_path, self._static_folder)
+ def _set_static_folder(self, value):
+ self._static_folder = value
+ static_folder = property(_get_static_folder, _set_static_folder, doc='''
+ The absolute path to the configured static folder.
+ ''')
+ del _get_static_folder, _set_static_folder
+
+ def _get_static_url_path(self):
+ if self._static_url_path is not None:
+ return self._static_url_path
+ if self.static_folder is not None:
+ return '/' + os.path.basename(self.static_folder)
+ def _set_static_url_path(self, value):
+ self._static_url_path = value
+ static_url_path = property(_get_static_url_path, _set_static_url_path)
+ del _get_static_url_path, _set_static_url_path
+
+ @property
+ def has_static_folder(self):
+ """This is ``True`` if the package bound object's container has a
+ folder for static files.
+
+ .. versionadded:: 0.5
+ """
+ return self.static_folder is not None
+
+ @locked_cached_property
+ def jinja_loader(self):
+ """The Jinja loader for this package bound object.
+
+ .. versionadded:: 0.5
+ """
+ if self.template_folder is not None:
+ return FileSystemLoader(os.path.join(self.root_path,
+ self.template_folder))
+
+ def get_send_file_max_age(self, filename):
+ """Provides default cache_timeout for the :func:`send_file` functions.
+
+ By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
+ the configuration of :data:`~flask.current_app`.
+
+ Static file functions such as :func:`send_from_directory` use this
+ function, and :func:`send_file` calls this function on
+ :data:`~flask.current_app` when the given cache_timeout is ``None``. If a
+ cache_timeout is given in :func:`send_file`, that timeout is used;
+ otherwise, this method is called.
+
+ This allows subclasses to change the behavior when sending files based
+ on the filename. For example, to set the cache timeout for .js files
+ to 60 seconds::
+
+ class MyFlask(flask.Flask):
+ def get_send_file_max_age(self, name):
+ if name.lower().endswith('.js'):
+ return 60
+ return flask.Flask.get_send_file_max_age(self, name)
+
+ .. versionadded:: 0.9
+ """
+ return total_seconds(current_app.send_file_max_age_default)
+
+ def send_static_file(self, filename):
+ """Function used internally to send static files from the static
+ folder to the browser.
+
+ .. versionadded:: 0.5
+ """
+ if not self.has_static_folder:
+ raise RuntimeError('No static folder for this object')
+ # Ensure get_send_file_max_age is called in all cases.
+ # Here, we ensure get_send_file_max_age is called for Blueprints.
+ cache_timeout = self.get_send_file_max_age(filename)
+ return send_from_directory(self.static_folder, filename,
+ cache_timeout=cache_timeout)
+
+ def open_resource(self, resource, mode='rb'):
+ """Opens a resource from the application's resource folder. To see
+ how this works, consider the following folder structure::
+
+ /myapplication.py
+ /schema.sql
+ /static
+ /style.css
+ /templates
+ /layout.html
+ /index.html
+
+ If you want to open the :file:`schema.sql` file you would do the
+ following::
+
+ with app.open_resource('schema.sql') as f:
+ contents = f.read()
+ do_something_with(contents)
+
+ :param resource: the name of the resource. To access resources within
+ subfolders use forward slashes as separator.
+ :param mode: resource file opening mode, default is 'rb'.
+ """
+ if mode not in ('r', 'rb'):
+ raise ValueError('Resources can only be opened for reading')
+ return open(os.path.join(self.root_path, resource), mode)
+
+
+def total_seconds(td):
+ """Returns the total seconds from a timedelta object.
+
+ :param timedelta td: the timedelta to be converted in seconds
+
+ :returns: number of seconds
+ :rtype: int
+ """
+ return td.days * 60 * 60 * 24 + td.seconds
diff --git a/appWSM/lib/python2.7/site-packages/flask/json.py b/appWSM/lib/python2.7/site-packages/flask/json.py
new file mode 100644
index 0000000000000000000000000000000000000000..16e0c295613c959ea8861251fd82f6bea1afbcfd
--- /dev/null
+++ b/appWSM/lib/python2.7/site-packages/flask/json.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.jsonimpl
+ ~~~~~~~~~~~~~~
+
+ Implementation helpers for the JSON support in Flask.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import io
+import uuid
+from datetime import date
+from .globals import current_app, request
+from ._compat import text_type, PY2
+
+from werkzeug.http import http_date
+from jinja2 import Markup
+
+# Use the same json implementation as itsdangerous on which we
+# depend anyways.
+from itsdangerous import json as _json
+
+
+# Figure out if simplejson escapes slashes. This behavior was changed
+# from one version to another without reason.
+_slash_escape = '\\/' not in _json.dumps('/')
+
+
+__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
+ 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
+ 'jsonify']
+
+
+def _wrap_reader_for_text(fp, encoding):
+ if isinstance(fp.read(0), bytes):
+ fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
+ return fp
+
+
+def _wrap_writer_for_text(fp, encoding):
+ try:
+ fp.write('')
+ except TypeError:
+ fp = io.TextIOWrapper(fp, encoding)
+ return fp
+
+
+class JSONEncoder(_json.JSONEncoder):
+ """The default Flask JSON encoder. This one extends the default simplejson
+ encoder by also supporting ``datetime`` objects, ``UUID`` as well as
+ ``Markup`` objects which are serialized as RFC 822 datetime strings (same
+ as the HTTP date format). In order to support more data types override the
+ :meth:`default` method.
+ """
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns a
+ serializable object for ``o``, or calls the base implementation (to
+ raise a :exc:`TypeError`).
+
+ For example, to support arbitrary iterators, you could implement
+ default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+ """
+ if isinstance(o, date):
+ return http_date(o.timetuple())
+ if isinstance(o, uuid.UUID):
+ return str(o)
+ if hasattr(o, '__html__'):
+ return text_type(o.__html__())
+ return _json.JSONEncoder.default(self, o)
+
+
+class JSONDecoder(_json.JSONDecoder):
+ """The default JSON decoder. This one does not change the behavior from
+ the default simplejson decoder. Consult the :mod:`json` documentation
+ for more information. This decoder is not only used for the load
+ functions of this module but also :attr:`~flask.Request`.
+ """
+
+
+def _dump_arg_defaults(kwargs):
+ """Inject default arguments for dump functions."""
+ if current_app:
+ kwargs.setdefault('cls', current_app.json_encoder)
+ if not current_app.config['JSON_AS_ASCII']:
+ kwargs.setdefault('ensure_ascii', False)
+ kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
+ else:
+ kwargs.setdefault('sort_keys', True)
+ kwargs.setdefault('cls', JSONEncoder)
+
+
+def _load_arg_defaults(kwargs):
+ """Inject default arguments for load functions."""
+ if current_app:
+ kwargs.setdefault('cls', current_app.json_decoder)
+ else:
+ kwargs.setdefault('cls', JSONDecoder)
+
+
+def dumps(obj, **kwargs):
+ """Serialize ``obj`` to a JSON formatted ``str`` by using the application's
+ configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
+ application on the stack.
+
+ This function can return ``unicode`` strings or ascii-only bytestrings by
+ default which coerce into unicode strings automatically. That behavior by
+ default is controlled by the ``JSON_AS_ASCII`` configuration variable
+ and can be overridden by the simplejson ``ensure_ascii`` parameter.
+ """
+ _dump_arg_defaults(kwargs)
+ encoding = kwargs.pop('encoding', None)
+ rv = _json.dumps(obj, **kwargs)
+ if encoding is not None and isinstance(rv, text_type):
+ rv = rv.encode(encoding)
+ return rv
+
+
+def dump(obj, fp, **kwargs):
+ """Like :func:`dumps` but writes into a file object."""
+ _dump_arg_defaults(kwargs)
+ encoding = kwargs.pop('encoding', None)
+ if encoding is not None:
+ fp = _wrap_writer_for_text(fp, encoding)
+ _json.dump(obj, fp, **kwargs)
+
+
+def loads(s, **kwargs):
+ """Unserialize a JSON object from a string ``s`` by using the application's
+ configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
+ application on the stack.
+ """
+ _load_arg_defaults(kwargs)
+ if isinstance(s, bytes):
+ s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
+ return _json.loads(s, **kwargs)
+
+
+def load(fp, **kwargs):
+ """Like :func:`loads` but reads from a file object.
+ """
+ _load_arg_defaults(kwargs)
+ if not PY2:
+ fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
+ return _json.load(fp, **kwargs)
+
+
+def htmlsafe_dumps(obj, **kwargs):
+ """Works exactly like :func:`dumps` but is safe for use in ``