diff --git a/cps/web.py b/cps/web.py index fdf5d1b7..5f027562 100755 --- a/cps/web.py +++ b/cps/web.py @@ -18,9 +18,8 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy import __version__ as sqlalchemyVersion from math import ceil from flask_login import LoginManager, login_user, logout_user, login_required, current_user -from flask_login import __version__ as flask_loginVersion from flask_principal import Principal, Identity, AnonymousIdentity, identity_changed -from flask_login import __version__ as flask_principalVersion +from flask_principal import __version__ as flask_principalVersion from flask_babel import Babel from flask_babel import gettext as _ import requests @@ -48,6 +47,11 @@ from shutil import move, copyfile from tornado.ioloop import IOLoop from tornado import version as tornadoVersion +try: + from flask_login import __version__ as flask_loginVersion +except ImportError, e: + from flask_login.__about__ import __version__ as flask_loginVersion + try: from wand.image import Image diff --git a/readme.md b/readme.md index c79a734e..8108054c 100755 --- a/readme.md +++ b/readme.md @@ -1,4 +1,4 @@ -##About +## About Calibre Web is a web app providing a clean interface for browsing, reading and downloading eBooks using an existing [Calibre](https://calibre-ebook.com) database. @@ -6,7 +6,8 @@ Calibre Web is a web app providing a clean interface for browsing, reading and d ![screenshot](https://raw.githubusercontent.com/janeczku/docker-calibre-web/master/screenshot.png) -##Features +## Features + - Bootstrap 3 HTML5 interface - full graphical setup - User management @@ -28,13 +29,14 @@ Calibre Web is a web app providing a clean interface for browsing, reading and d ## Quick start -1. Execute the command: `python cps.py` (or `nohup python cps.py` - recommended if you want to exit the terminal window) -2. Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog -3. Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button -4. Go to Login page +1. Install required dependencies by executing `pip install -r requirements.txt` +2. Execute the command: `python cps.py` (or `nohup python cps.py` - recommended if you want to exit the terminal window) +3. Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog +4. Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button +5. Go to Login page -**Default admin login:** -*Username:* admin +**Default admin login:** +*Username:* admin *Password:* admin123 ## Runtime Configuration Options @@ -56,10 +58,10 @@ Tick to enable uploading of PDF, epub, FB2. This requires the imagemagick librar ## Requirements Python 2.7+ - -Optionally, to enable on-the-fly conversion from EPUB to MOBI when using the send-to-kindle feature: -[Download](http://www.amazon.com/gp/feature.html?docId=1000765211) Amazon's KindleGen tool for your platform and place the binary named as `kindlegen` in the `vendor` folder. +Optionally, to enable on-the-fly conversion from EPUB to MOBI when using the send-to-kindle feature: + +[Download](http://www.amazon.com/gp/feature.html?docId=1000765211) Amazon's KindleGen tool for your platform and place the binary named as `kindlegen` in the `vendor` folder. ## Docker image @@ -131,4 +133,4 @@ Replace the user and ExecStart with your user and foldernames. `sudo systemctl enable cps.service` -enables the service. +enables the service. diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..a140cbb5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +Babel>=1.3 +Flask>=0.11 +Flask-Babel==0.11.1 +Flask-Login>=0.3.2 +Flask-Principal>=0.3.2 +iso-639>=0.4.5 +PyPDF2==1.26.0 +pytz>=2016.10 +requests>=2.11.1 +SQLAlchemy>=0.8.4 +tornado>=4.4.2 +Wand>=0.4.4 diff --git a/vendor/pytz/zoneinfo/Africa/__init__.py b/vendor/.gitempty similarity index 100% rename from vendor/pytz/zoneinfo/Africa/__init__.py rename to vendor/.gitempty diff --git a/vendor/LICENSE_flask_login b/vendor/LICENSE_flask_login deleted file mode 100644 index 04463812..00000000 --- a/vendor/LICENSE_flask_login +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2011 Matthew Frazier - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/LICENSE_flask_principal b/vendor/LICENSE_flask_principal deleted file mode 100644 index 85522fd0..00000000 --- a/vendor/LICENSE_flask_principal +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 Ali Afshar - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/LICENSE_itsdangerous b/vendor/LICENSE_itsdangerous deleted file mode 100644 index 183d7f6d..00000000 --- a/vendor/LICENSE_itsdangerous +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2011 by Armin Ronacher and the Django Software Foundation. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/PyPDF2/__init__.py b/vendor/PyPDF2/__init__.py deleted file mode 100755 index f458c0ea..00000000 --- a/vendor/PyPDF2/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .pdf import PdfFileReader, PdfFileWriter -from .merger import PdfFileMerger -from .pagerange import PageRange, parse_filename_page_ranges -from ._version import __version__ -__all__ = ["pdf", "PdfFileMerger"] diff --git a/vendor/PyPDF2/_version.py b/vendor/PyPDF2/_version.py deleted file mode 100755 index 5fc7041e..00000000 --- a/vendor/PyPDF2/_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '1.26.0' diff --git a/vendor/PyPDF2/filters.py b/vendor/PyPDF2/filters.py deleted file mode 100755 index 3717fd4c..00000000 --- a/vendor/PyPDF2/filters.py +++ /dev/null @@ -1,362 +0,0 @@ -# vim: sw=4:expandtab:foldmethod=marker -# -# Copyright (c) 2006, Mathieu Fenniak -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - - -""" -Implementation of stream filters for PDF. -""" -__author__ = "Mathieu Fenniak" -__author_email__ = "biziqe@mathieu.fenniak.net" - -from .utils import PdfReadError, ord_, chr_ -from sys import version_info -if version_info < ( 3, 0 ): - from cStringIO import StringIO -else: - from io import StringIO - import struct - -try: - import zlib - - def decompress(data): - return zlib.decompress(data) - - def compress(data): - return zlib.compress(data) - -except ImportError: - # Unable to import zlib. Attempt to use the System.IO.Compression - # library from the .NET framework. (IronPython only) - import System - from System import IO, Collections, Array - - def _string_to_bytearr(buf): - retval = Array.CreateInstance(System.Byte, len(buf)) - for i in range(len(buf)): - retval[i] = ord(buf[i]) - return retval - - def _bytearr_to_string(bytes): - retval = "" - for i in range(bytes.Length): - retval += chr(bytes[i]) - return retval - - def _read_bytes(stream): - ms = IO.MemoryStream() - buf = Array.CreateInstance(System.Byte, 2048) - while True: - bytes = stream.Read(buf, 0, buf.Length) - if bytes == 0: - break - else: - ms.Write(buf, 0, bytes) - retval = ms.ToArray() - ms.Close() - return retval - - def decompress(data): - bytes = _string_to_bytearr(data) - ms = IO.MemoryStream() - ms.Write(bytes, 0, bytes.Length) - ms.Position = 0 # fseek 0 - gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Decompress) - bytes = _read_bytes(gz) - retval = _bytearr_to_string(bytes) - gz.Close() - return retval - - def compress(data): - bytes = _string_to_bytearr(data) - ms = IO.MemoryStream() - gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Compress, True) - gz.Write(bytes, 0, bytes.Length) - gz.Close() - ms.Position = 0 # fseek 0 - bytes = ms.ToArray() - retval = _bytearr_to_string(bytes) - ms.Close() - return retval - - -class FlateDecode(object): - def decode(data, decodeParms): - data = decompress(data) - predictor = 1 - if decodeParms: - try: - predictor = decodeParms.get("/Predictor", 1) - except AttributeError: - pass # usually an array with a null object was read - - # predictor 1 == no predictor - if predictor != 1: - columns = decodeParms["/Columns"] - # PNG prediction: - if predictor >= 10 and predictor <= 15: - output = StringIO() - # PNG prediction can vary from row to row - rowlength = columns + 1 - assert len(data) % rowlength == 0 - prev_rowdata = (0,) * rowlength - for row in range(len(data) // rowlength): - rowdata = [ord_(x) for x in data[(row*rowlength):((row+1)*rowlength)]] - filterByte = rowdata[0] - if filterByte == 0: - pass - elif filterByte == 1: - for i in range(2, rowlength): - rowdata[i] = (rowdata[i] + rowdata[i-1]) % 256 - elif filterByte == 2: - for i in range(1, rowlength): - rowdata[i] = (rowdata[i] + prev_rowdata[i]) % 256 - else: - # unsupported PNG filter - raise PdfReadError("Unsupported PNG filter %r" % filterByte) - prev_rowdata = rowdata - output.write(''.join([chr(x) for x in rowdata[1:]])) - data = output.getvalue() - else: - # unsupported predictor - raise PdfReadError("Unsupported flatedecode predictor %r" % predictor) - return data - decode = staticmethod(decode) - - def encode(data): - return compress(data) - encode = staticmethod(encode) - - -class ASCIIHexDecode(object): - def decode(data, decodeParms=None): - retval = "" - char = "" - x = 0 - while True: - c = data[x] - if c == ">": - break - elif c.isspace(): - x += 1 - continue - char += c - if len(char) == 2: - retval += chr(int(char, base=16)) - char = "" - x += 1 - assert char == "" - return retval - decode = staticmethod(decode) - - -class LZWDecode(object): - """Taken from: - http://www.java2s.com/Open-Source/Java-Document/PDF/PDF-Renderer/com/sun/pdfview/decode/LZWDecode.java.htm - """ - class decoder(object): - def __init__(self, data): - self.STOP=257 - self.CLEARDICT=256 - self.data=data - self.bytepos=0 - self.bitpos=0 - self.dict=[""]*4096 - for i in range(256): - self.dict[i]=chr(i) - self.resetDict() - - def resetDict(self): - self.dictlen=258 - self.bitspercode=9 - - def nextCode(self): - fillbits=self.bitspercode - value=0 - while fillbits>0 : - if self.bytepos >= len(self.data): - return -1 - nextbits=ord(self.data[self.bytepos]) - bitsfromhere=8-self.bitpos - if bitsfromhere>fillbits: - bitsfromhere=fillbits - value |= (((nextbits >> (8-self.bitpos-bitsfromhere)) & - (0xff >> (8-bitsfromhere))) << - (fillbits-bitsfromhere)) - fillbits -= bitsfromhere - self.bitpos += bitsfromhere - if self.bitpos >=8: - self.bitpos=0 - self.bytepos = self.bytepos+1 - return value - - def decode(self): - """ algorithm derived from: - http://www.rasip.fer.hr/research/compress/algorithms/fund/lz/lzw.html - and the PDFReference - """ - cW = self.CLEARDICT; - baos="" - while True: - pW = cW; - cW = self.nextCode(); - if cW == -1: - raise PdfReadError("Missed the stop code in LZWDecode!") - if cW == self.STOP: - break; - elif cW == self.CLEARDICT: - self.resetDict(); - elif pW == self.CLEARDICT: - baos+=self.dict[cW] - else: - if cW < self.dictlen: - baos += self.dict[cW] - p=self.dict[pW]+self.dict[cW][0] - self.dict[self.dictlen]=p - self.dictlen+=1 - else: - p=self.dict[pW]+self.dict[pW][0] - baos+=p - self.dict[self.dictlen] = p; - self.dictlen+=1 - if (self.dictlen >= (1 << self.bitspercode) - 1 and - self.bitspercode < 12): - self.bitspercode+=1 - return baos - - @staticmethod - def decode(data,decodeParams=None): - return LZWDecode.decoder(data).decode() - - -class ASCII85Decode(object): - def decode(data, decodeParms=None): - if version_info < ( 3, 0 ): - retval = "" - group = [] - x = 0 - hitEod = False - # remove all whitespace from data - data = [y for y in data if not (y in ' \n\r\t')] - while not hitEod: - c = data[x] - if len(retval) == 0 and c == "<" and data[x+1] == "~": - x += 2 - continue - #elif c.isspace(): - # x += 1 - # continue - elif c == 'z': - assert len(group) == 0 - retval += '\x00\x00\x00\x00' - x += 1 - continue - elif c == "~" and data[x+1] == ">": - if len(group) != 0: - # cannot have a final group of just 1 char - assert len(group) > 1 - cnt = len(group) - 1 - group += [ 85, 85, 85 ] - hitEod = cnt - else: - break - else: - c = ord(c) - 33 - assert c >= 0 and c < 85 - group += [ c ] - if len(group) >= 5: - b = group[0] * (85**4) + \ - group[1] * (85**3) + \ - group[2] * (85**2) + \ - group[3] * 85 + \ - group[4] - assert b < (2**32 - 1) - c4 = chr((b >> 0) % 256) - c3 = chr((b >> 8) % 256) - c2 = chr((b >> 16) % 256) - c1 = chr(b >> 24) - retval += (c1 + c2 + c3 + c4) - if hitEod: - retval = retval[:-4+hitEod] - group = [] - x += 1 - return retval - else: - if isinstance(data, str): - data = data.encode('ascii') - n = b = 0 - out = bytearray() - for c in data: - if ord('!') <= c and c <= ord('u'): - n += 1 - b = b*85+(c-33) - if n == 5: - out += struct.pack(b'>L',b) - n = b = 0 - elif c == ord('z'): - assert n == 0 - out += b'\0\0\0\0' - elif c == ord('~'): - if n: - for _ in range(5-n): - b = b*85+84 - out += struct.pack(b'>L',b)[:n-1] - break - return bytes(out) - decode = staticmethod(decode) - - -def decodeStreamData(stream): - from .generic import NameObject - filters = stream.get("/Filter", ()) - if len(filters) and not isinstance(filters[0], NameObject): - # we have a single filter instance - filters = (filters,) - data = stream._data - # If there is not data to decode we should not try to decode the data. - if data: - for filterType in filters: - if filterType == "/FlateDecode" or filterType == "/Fl": - data = FlateDecode.decode(data, stream.get("/DecodeParms")) - elif filterType == "/ASCIIHexDecode" or filterType == "/AHx": - data = ASCIIHexDecode.decode(data) - elif filterType == "/LZWDecode" or filterType == "/LZW": - data = LZWDecode.decode(data, stream.get("/DecodeParms")) - elif filterType == "/ASCII85Decode" or filterType == "/A85": - data = ASCII85Decode.decode(data) - elif filterType == "/Crypt": - decodeParams = stream.get("/DecodeParams", {}) - if "/Name" not in decodeParams and "/Type" not in decodeParams: - pass - else: - raise NotImplementedError("/Crypt filter with /Name or /Type not supported yet") - else: - # unsupported filter - raise NotImplementedError("unsupported filter %s" % filterType) - return data diff --git a/vendor/PyPDF2/generic.py b/vendor/PyPDF2/generic.py deleted file mode 100755 index c4332297..00000000 --- a/vendor/PyPDF2/generic.py +++ /dev/null @@ -1,1226 +0,0 @@ -# vim: sw=4:expandtab:foldmethod=marker -# -# Copyright (c) 2006, Mathieu Fenniak -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - - -""" -Implementation of generic PDF objects (dictionary, number, string, and so on) -""" -__author__ = "Mathieu Fenniak" -__author_email__ = "biziqe@mathieu.fenniak.net" - -import re -from .utils import readNonWhitespace, RC4_encrypt, skipOverComment -from .utils import b_, u_, chr_, ord_ -from .utils import PdfStreamError -import warnings -from . import filters -from . import utils -import decimal -import codecs -import sys -#import debugging - -ObjectPrefix = b_('/<[tf(n%') -NumberSigns = b_('+-') -IndirectPattern = re.compile(b_(r"(\d+)\s+(\d+)\s+R[^a-zA-Z]")) - - -def readObject(stream, pdf): - tok = stream.read(1) - stream.seek(-1, 1) # reset to start - idx = ObjectPrefix.find(tok) - if idx == 0: - # name object - return NameObject.readFromStream(stream, pdf) - elif idx == 1: - # hexadecimal string OR dictionary - peek = stream.read(2) - stream.seek(-2, 1) # reset to start - if peek == b_('<<'): - return DictionaryObject.readFromStream(stream, pdf) - else: - return readHexStringFromStream(stream) - elif idx == 2: - # array object - return ArrayObject.readFromStream(stream, pdf) - elif idx == 3 or idx == 4: - # boolean object - return BooleanObject.readFromStream(stream) - elif idx == 5: - # string object - return readStringFromStream(stream) - elif idx == 6: - # null object - return NullObject.readFromStream(stream) - elif idx == 7: - # comment - while tok not in (b_('\r'), b_('\n')): - tok = stream.read(1) - tok = readNonWhitespace(stream) - stream.seek(-1, 1) - return readObject(stream, pdf) - else: - # number object OR indirect reference - if tok in NumberSigns: - # number - return NumberObject.readFromStream(stream) - peek = stream.read(20) - stream.seek(-len(peek), 1) # reset to start - if IndirectPattern.match(peek) != None: - return IndirectObject.readFromStream(stream, pdf) - else: - return NumberObject.readFromStream(stream) - - -class PdfObject(object): - def getObject(self): - """Resolves indirect references.""" - return self - - -class NullObject(PdfObject): - def writeToStream(self, stream, encryption_key): - stream.write(b_("null")) - - def readFromStream(stream): - nulltxt = stream.read(4) - if nulltxt != b_("null"): - raise utils.PdfReadError("Could not read Null object") - return NullObject() - readFromStream = staticmethod(readFromStream) - - -class BooleanObject(PdfObject): - def __init__(self, value): - self.value = value - - def writeToStream(self, stream, encryption_key): - if self.value: - stream.write(b_("true")) - else: - stream.write(b_("false")) - - def readFromStream(stream): - word = stream.read(4) - if word == b_("true"): - return BooleanObject(True) - elif word == b_("fals"): - stream.read(1) - return BooleanObject(False) - else: - raise utils.PdfReadError('Could not read Boolean object') - readFromStream = staticmethod(readFromStream) - - -class ArrayObject(list, PdfObject): - def writeToStream(self, stream, encryption_key): - stream.write(b_("[")) - for data in self: - stream.write(b_(" ")) - data.writeToStream(stream, encryption_key) - stream.write(b_(" ]")) - - def readFromStream(stream, pdf): - arr = ArrayObject() - tmp = stream.read(1) - if tmp != b_("["): - raise utils.PdfReadError("Could not read array") - while True: - # skip leading whitespace - tok = stream.read(1) - while tok.isspace(): - tok = stream.read(1) - stream.seek(-1, 1) - # check for array ending - peekahead = stream.read(1) - if peekahead == b_("]"): - break - stream.seek(-1, 1) - # read and append obj - arr.append(readObject(stream, pdf)) - return arr - readFromStream = staticmethod(readFromStream) - - -class IndirectObject(PdfObject): - def __init__(self, idnum, generation, pdf): - self.idnum = idnum - self.generation = generation - self.pdf = pdf - - def getObject(self): - return self.pdf.getObject(self).getObject() - - def __repr__(self): - return "IndirectObject(%r, %r)" % (self.idnum, self.generation) - - def __eq__(self, other): - return ( - other != None and - isinstance(other, IndirectObject) and - self.idnum == other.idnum and - self.generation == other.generation and - self.pdf is other.pdf - ) - - def __ne__(self, other): - return not self.__eq__(other) - - def writeToStream(self, stream, encryption_key): - stream.write(b_("%s %s R" % (self.idnum, self.generation))) - - def readFromStream(stream, pdf): - idnum = b_("") - while True: - tok = stream.read(1) - if not tok: - # stream has truncated prematurely - raise PdfStreamError("Stream has ended unexpectedly") - if tok.isspace(): - break - idnum += tok - generation = b_("") - while True: - tok = stream.read(1) - if not tok: - # stream has truncated prematurely - raise PdfStreamError("Stream has ended unexpectedly") - if tok.isspace(): - if not generation: - continue - break - generation += tok - r = readNonWhitespace(stream) - if r != b_("R"): - raise utils.PdfReadError("Error reading indirect object reference at byte %s" % utils.hexStr(stream.tell())) - return IndirectObject(int(idnum), int(generation), pdf) - readFromStream = staticmethod(readFromStream) - - -class FloatObject(decimal.Decimal, PdfObject): - def __new__(cls, value="0", context=None): - try: - return decimal.Decimal.__new__(cls, utils.str_(value), context) - except: - return decimal.Decimal.__new__(cls, str(value)) - - def __repr__(self): - if self == self.to_integral(): - return str(self.quantize(decimal.Decimal(1))) - else: - # Standard formatting adds useless extraneous zeros. - o = "%.5f" % self - # Remove the zeros. - while o and o[-1] == '0': - o = o[:-1] - return o - - def as_numeric(self): - return float(b_(repr(self))) - - def writeToStream(self, stream, encryption_key): - stream.write(b_(repr(self))) - - -class NumberObject(int, PdfObject): - NumberPattern = re.compile(b_('[^+-.0-9]')) - ByteDot = b_(".") - - def __new__(cls, value): - val = int(value) - try: - return int.__new__(cls, val) - except OverflowError: - return int.__new__(cls, 0) - - def as_numeric(self): - return int(b_(repr(self))) - - def writeToStream(self, stream, encryption_key): - stream.write(b_(repr(self))) - - def readFromStream(stream): - num = utils.readUntilRegex(stream, NumberObject.NumberPattern) - if num.find(NumberObject.ByteDot) != -1: - return FloatObject(num) - else: - return NumberObject(num) - readFromStream = staticmethod(readFromStream) - - -## -# Given a string (either a "str" or "unicode"), create a ByteStringObject or a -# TextStringObject to represent the string. -def createStringObject(string): - if isinstance(string, utils.string_type): - return TextStringObject(string) - elif isinstance(string, utils.bytes_type): - try: - if string.startswith(codecs.BOM_UTF16_BE): - retval = TextStringObject(string.decode("utf-16")) - retval.autodetect_utf16 = True - return retval - else: - # This is probably a big performance hit here, but we need to - # convert string objects into the text/unicode-aware version if - # possible... and the only way to check if that's possible is - # to try. Some strings are strings, some are just byte arrays. - retval = TextStringObject(decode_pdfdocencoding(string)) - retval.autodetect_pdfdocencoding = True - return retval - except UnicodeDecodeError: - return ByteStringObject(string) - else: - raise TypeError("createStringObject should have str or unicode arg") - - -def readHexStringFromStream(stream): - stream.read(1) - txt = "" - x = b_("") - while True: - tok = readNonWhitespace(stream) - if not tok: - # stream has truncated prematurely - raise PdfStreamError("Stream has ended unexpectedly") - if tok == b_(">"): - break - x += tok - if len(x) == 2: - txt += chr(int(x, base=16)) - x = b_("") - if len(x) == 1: - x += b_("0") - if len(x) == 2: - txt += chr(int(x, base=16)) - return createStringObject(b_(txt)) - - -def readStringFromStream(stream): - tok = stream.read(1) - parens = 1 - txt = b_("") - while True: - tok = stream.read(1) - if not tok: - # stream has truncated prematurely - raise PdfStreamError("Stream has ended unexpectedly") - if tok == b_("("): - parens += 1 - elif tok == b_(")"): - parens -= 1 - if parens == 0: - break - elif tok == b_("\\"): - tok = stream.read(1) - if tok == b_("n"): - tok = b_("\n") - elif tok == b_("r"): - tok = b_("\r") - elif tok == b_("t"): - tok = b_("\t") - elif tok == b_("b"): - tok = b_("\b") - elif tok == b_("f"): - tok = b_("\f") - elif tok == b_("c"): - tok = b_("\c") - elif tok == b_("("): - tok = b_("(") - elif tok == b_(")"): - tok = b_(")") - elif tok == b_("/"): - tok = b_("/") - elif tok == b_("\\"): - tok = b_("\\") - elif tok in (b_(" "), b_("/"), b_("%"), b_("<"), b_(">"), b_("["), - b_("]"), b_("#"), b_("_"), b_("&"), b_('$')): - # odd/unnessecary escape sequences we have encountered - tok = b_(tok) - elif tok.isdigit(): - # "The number ddd may consist of one, two, or three - # octal digits; high-order overflow shall be ignored. - # Three octal digits shall be used, with leading zeros - # as needed, if the next character of the string is also - # a digit." (PDF reference 7.3.4.2, p 16) - for i in range(2): - ntok = stream.read(1) - if ntok.isdigit(): - tok += ntok - else: - break - tok = b_(chr(int(tok, base=8))) - elif tok in b_("\n\r"): - # This case is hit when a backslash followed by a line - # break occurs. If it's a multi-char EOL, consume the - # second character: - tok = stream.read(1) - if not tok in b_("\n\r"): - stream.seek(-1, 1) - # Then don't add anything to the actual string, since this - # line break was escaped: - tok = b_('') - else: - raise utils.PdfReadError(r"Unexpected escaped string: %s" % tok) - txt += tok - return createStringObject(txt) - - -## -# Represents a string object where the text encoding could not be determined. -# This occurs quite often, as the PDF spec doesn't provide an alternate way to -# represent strings -- for example, the encryption data stored in files (like -# /O) is clearly not text, but is still stored in a "String" object. -class ByteStringObject(utils.bytes_type, PdfObject): - - ## - # For compatibility with TextStringObject.original_bytes. This method - # returns self. - original_bytes = property(lambda self: self) - - def writeToStream(self, stream, encryption_key): - bytearr = self - if encryption_key: - bytearr = RC4_encrypt(encryption_key, bytearr) - stream.write(b_("<")) - stream.write(utils.hexencode(bytearr)) - stream.write(b_(">")) - - -## -# Represents a string object that has been decoded into a real unicode string. -# If read from a PDF document, this string appeared to match the -# PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to -# occur. -class TextStringObject(utils.string_type, PdfObject): - autodetect_pdfdocencoding = False - autodetect_utf16 = False - - ## - # It is occasionally possible that a text string object gets created where - # a byte string object was expected due to the autodetection mechanism -- - # if that occurs, this "original_bytes" property can be used to - # back-calculate what the original encoded bytes were. - original_bytes = property(lambda self: self.get_original_bytes()) - - def get_original_bytes(self): - # We're a text string object, but the library is trying to get our raw - # bytes. This can happen if we auto-detected this string as text, but - # we were wrong. It's pretty common. Return the original bytes that - # would have been used to create this object, based upon the autodetect - # method. - if self.autodetect_utf16: - return codecs.BOM_UTF16_BE + self.encode("utf-16be") - elif self.autodetect_pdfdocencoding: - return encode_pdfdocencoding(self) - else: - raise Exception("no information about original bytes") - - def writeToStream(self, stream, encryption_key): - # Try to write the string out as a PDFDocEncoding encoded string. It's - # nicer to look at in the PDF file. Sadly, we take a performance hit - # here for trying... - try: - bytearr = encode_pdfdocencoding(self) - except UnicodeEncodeError: - bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be") - if encryption_key: - bytearr = RC4_encrypt(encryption_key, bytearr) - obj = ByteStringObject(bytearr) - obj.writeToStream(stream, None) - else: - stream.write(b_("(")) - for c in bytearr: - if not chr_(c).isalnum() and c != b_(' '): - stream.write(b_("\\%03o" % ord_(c))) - else: - stream.write(b_(chr_(c))) - stream.write(b_(")")) - - -class NameObject(str, PdfObject): - delimiterPattern = re.compile(b_(r"\s+|[\(\)<>\[\]{}/%]")) - surfix = b_("/") - - def writeToStream(self, stream, encryption_key): - stream.write(b_(self)) - - def readFromStream(stream, pdf): - debug = False - if debug: print((stream.tell())) - name = stream.read(1) - if name != NameObject.surfix: - raise utils.PdfReadError("name read error") - name += utils.readUntilRegex(stream, NameObject.delimiterPattern, - ignore_eof=True) - if debug: print(name) - try: - return NameObject(name.decode('utf-8')) - except (UnicodeEncodeError, UnicodeDecodeError) as e: - # Name objects should represent irregular characters - # with a '#' followed by the symbol's hex number - if not pdf.strict: - warnings.warn("Illegal character in Name Object", utils.PdfReadWarning) - return NameObject(name) - else: - raise utils.PdfReadError("Illegal character in Name Object") - - readFromStream = staticmethod(readFromStream) - - -class DictionaryObject(dict, PdfObject): - def raw_get(self, key): - return dict.__getitem__(self, key) - - def __setitem__(self, key, value): - if not isinstance(key, PdfObject): - raise ValueError("key must be PdfObject") - if not isinstance(value, PdfObject): - raise ValueError("value must be PdfObject") - return dict.__setitem__(self, key, value) - - def setdefault(self, key, value=None): - if not isinstance(key, PdfObject): - raise ValueError("key must be PdfObject") - if not isinstance(value, PdfObject): - raise ValueError("value must be PdfObject") - return dict.setdefault(self, key, value) - - def __getitem__(self, key): - return dict.__getitem__(self, key).getObject() - - ## - # Retrieves XMP (Extensible Metadata Platform) data relevant to the - # this object, if available. - #

- # Stability: Added in v1.12, will exist for all future v1.x releases. - # @return Returns a {@link #xmp.XmpInformation XmlInformation} instance - # that can be used to access XMP metadata from the document. Can also - # return None if no metadata was found on the document root. - def getXmpMetadata(self): - metadata = self.get("/Metadata", None) - if metadata == None: - return None - metadata = metadata.getObject() - from . import xmp - if not isinstance(metadata, xmp.XmpInformation): - metadata = xmp.XmpInformation(metadata) - self[NameObject("/Metadata")] = metadata - return metadata - - ## - # Read-only property that accesses the {@link - # #DictionaryObject.getXmpData getXmpData} function. - #

- # Stability: Added in v1.12, will exist for all future v1.x releases. - xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None) - - def writeToStream(self, stream, encryption_key): - stream.write(b_("<<\n")) - for key, value in list(self.items()): - key.writeToStream(stream, encryption_key) - stream.write(b_(" ")) - value.writeToStream(stream, encryption_key) - stream.write(b_("\n")) - stream.write(b_(">>")) - - def readFromStream(stream, pdf): - debug = False - tmp = stream.read(2) - if tmp != b_("<<"): - raise utils.PdfReadError("Dictionary read error at byte %s: stream must begin with '<<'" % utils.hexStr(stream.tell())) - data = {} - while True: - tok = readNonWhitespace(stream) - if tok == b_('\x00'): - continue - elif tok == b_('%'): - stream.seek(-1, 1) - skipOverComment(stream) - continue - if not tok: - # stream has truncated prematurely - raise PdfStreamError("Stream has ended unexpectedly") - - if debug: print(("Tok:", tok)) - if tok == b_(">"): - stream.read(1) - break - stream.seek(-1, 1) - key = readObject(stream, pdf) - tok = readNonWhitespace(stream) - stream.seek(-1, 1) - value = readObject(stream, pdf) - if not data.get(key): - data[key] = value - elif pdf.strict: - # multiple definitions of key not permitted - raise utils.PdfReadError("Multiple definitions in dictionary at byte %s for key %s" \ - % (utils.hexStr(stream.tell()), key)) - else: - warnings.warn("Multiple definitions in dictionary at byte %s for key %s" \ - % (utils.hexStr(stream.tell()), key), utils.PdfReadWarning) - - pos = stream.tell() - s = readNonWhitespace(stream) - if s == b_('s') and stream.read(5) == b_('tream'): - eol = stream.read(1) - # odd PDF file output has spaces after 'stream' keyword but before EOL. - # patch provided by Danial Sandler - while eol == b_(' '): - eol = stream.read(1) - assert eol in (b_("\n"), b_("\r")) - if eol == b_("\r"): - # read \n after - if stream.read(1) != b_('\n'): - stream.seek(-1, 1) - # this is a stream object, not a dictionary - assert "/Length" in data - length = data["/Length"] - if debug: print(data) - if isinstance(length, IndirectObject): - t = stream.tell() - length = pdf.getObject(length) - stream.seek(t, 0) - data["__streamdata__"] = stream.read(length) - if debug: print("here") - #if debug: print(binascii.hexlify(data["__streamdata__"])) - e = readNonWhitespace(stream) - ndstream = stream.read(8) - if (e + ndstream) != b_("endstream"): - # (sigh) - the odd PDF file has a length that is too long, so - # we need to read backwards to find the "endstream" ending. - # ReportLab (unknown version) generates files with this bug, - # and Python users into PDF files tend to be our audience. - # we need to do this to correct the streamdata and chop off - # an extra character. - pos = stream.tell() - stream.seek(-10, 1) - end = stream.read(9) - if end == b_("endstream"): - # we found it by looking back one character further. - data["__streamdata__"] = data["__streamdata__"][:-1] - else: - if debug: print(("E", e, ndstream, debugging.toHex(end))) - stream.seek(pos, 0) - raise utils.PdfReadError("Unable to find 'endstream' marker after stream at byte %s." % utils.hexStr(stream.tell())) - else: - stream.seek(pos, 0) - if "__streamdata__" in data: - return StreamObject.initializeFromDictionary(data) - else: - retval = DictionaryObject() - retval.update(data) - return retval - readFromStream = staticmethod(readFromStream) - - -class TreeObject(DictionaryObject): - def __init__(self): - DictionaryObject.__init__(self) - - def hasChildren(self): - return '/First' in self - - def __iter__(self): - return self.children() - - def children(self): - if not self.hasChildren(): - raise StopIteration - - child = self['/First'] - while True: - yield child - if child == self['/Last']: - raise StopIteration - child = child['/Next'] - - def addChild(self, child, pdf): - childObj = child.getObject() - child = pdf.getReference(childObj) - assert isinstance(child, IndirectObject) - - if '/First' not in self: - self[NameObject('/First')] = child - self[NameObject('/Count')] = NumberObject(0) - prev = None - else: - prev = self['/Last'] - - self[NameObject('/Last')] = child - self[NameObject('/Count')] = NumberObject(self[NameObject('/Count')] + 1) - - if prev: - prevRef = pdf.getReference(prev) - assert isinstance(prevRef, IndirectObject) - childObj[NameObject('/Prev')] = prevRef - prev[NameObject('/Next')] = child - - parentRef = pdf.getReference(self) - assert isinstance(parentRef, IndirectObject) - childObj[NameObject('/Parent')] = parentRef - - def removeChild(self, child): - childObj = child.getObject() - - if NameObject('/Parent') not in childObj: - raise ValueError("Removed child does not appear to be a tree item") - elif childObj[NameObject('/Parent')] != self: - raise ValueError("Removed child is not a member of this tree") - - found = False - prevRef = None - prev = None - curRef = self[NameObject('/First')] - cur = curRef.getObject() - lastRef = self[NameObject('/Last')] - last = lastRef.getObject() - while cur != None: - if cur == childObj: - if prev == None: - if NameObject('/Next') in cur: - # Removing first tree node - nextRef = cur[NameObject('/Next')] - next = nextRef.getObject() - del next[NameObject('/Prev')] - self[NameObject('/First')] = nextRef - self[NameObject('/Count')] = self[NameObject('/Count')] - 1 - - else: - # Removing only tree node - assert self[NameObject('/Count')] == 1 - del self[NameObject('/Count')] - del self[NameObject('/First')] - if NameObject('/Last') in self: - del self[NameObject('/Last')] - else: - if NameObject('/Next') in cur: - # Removing middle tree node - nextRef = cur[NameObject('/Next')] - next = nextRef.getObject() - next[NameObject('/Prev')] = prevRef - prev[NameObject('/Next')] = nextRef - self[NameObject('/Count')] = self[NameObject('/Count')] - 1 - else: - # Removing last tree node - assert cur == last - del prev[NameObject('/Next')] - self[NameObject('/Last')] = prevRef - self[NameObject('/Count')] = self[NameObject('/Count')] - 1 - found = True - break - - prevRef = curRef - prev = cur - if NameObject('/Next') in cur: - curRef = cur[NameObject('/Next')] - cur = curRef.getObject() - else: - curRef = None - cur = None - - if not found: - raise ValueError("Removal couldn't find item in tree") - - del childObj[NameObject('/Parent')] - if NameObject('/Next') in childObj: - del childObj[NameObject('/Next')] - if NameObject('/Prev') in childObj: - del childObj[NameObject('/Prev')] - - def emptyTree(self): - for child in self: - childObj = child.getObject() - del childObj[NameObject('/Parent')] - if NameObject('/Next') in childObj: - del childObj[NameObject('/Next')] - if NameObject('/Prev') in childObj: - del childObj[NameObject('/Prev')] - - if NameObject('/Count') in self: - del self[NameObject('/Count')] - if NameObject('/First') in self: - del self[NameObject('/First')] - if NameObject('/Last') in self: - del self[NameObject('/Last')] - - -class StreamObject(DictionaryObject): - def __init__(self): - self._data = None - self.decodedSelf = None - - def writeToStream(self, stream, encryption_key): - self[NameObject("/Length")] = NumberObject(len(self._data)) - DictionaryObject.writeToStream(self, stream, encryption_key) - del self["/Length"] - stream.write(b_("\nstream\n")) - data = self._data - if encryption_key: - data = RC4_encrypt(encryption_key, data) - stream.write(data) - stream.write(b_("\nendstream")) - - def initializeFromDictionary(data): - if "/Filter" in data: - retval = EncodedStreamObject() - else: - retval = DecodedStreamObject() - retval._data = data["__streamdata__"] - del data["__streamdata__"] - del data["/Length"] - retval.update(data) - return retval - initializeFromDictionary = staticmethod(initializeFromDictionary) - - def flateEncode(self): - if "/Filter" in self: - f = self["/Filter"] - if isinstance(f, ArrayObject): - f.insert(0, NameObject("/FlateDecode")) - else: - newf = ArrayObject() - newf.append(NameObject("/FlateDecode")) - newf.append(f) - f = newf - else: - f = NameObject("/FlateDecode") - retval = EncodedStreamObject() - retval[NameObject("/Filter")] = f - retval._data = filters.FlateDecode.encode(self._data) - return retval - - -class DecodedStreamObject(StreamObject): - def getData(self): - return self._data - - def setData(self, data): - self._data = data - - -class EncodedStreamObject(StreamObject): - def __init__(self): - self.decodedSelf = None - - def getData(self): - if self.decodedSelf: - # cached version of decoded object - return self.decodedSelf.getData() - else: - # create decoded object - decoded = DecodedStreamObject() - - decoded._data = filters.decodeStreamData(self) - for key, value in list(self.items()): - if not key in ("/Length", "/Filter", "/DecodeParms"): - decoded[key] = value - self.decodedSelf = decoded - return decoded._data - - def setData(self, data): - raise utils.PdfReadError("Creating EncodedStreamObject is not currently supported") - - -class RectangleObject(ArrayObject): - """ - This class is used to represent *page boxes* in PyPDF2. These boxes include: - - * :attr:`artBox ` - * :attr:`bleedBox ` - * :attr:`cropBox ` - * :attr:`mediaBox ` - * :attr:`trimBox ` - """ - def __init__(self, arr): - # must have four points - assert len(arr) == 4 - # automatically convert arr[x] into NumberObject(arr[x]) if necessary - ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr]) - - def ensureIsNumber(self, value): - if not isinstance(value, (NumberObject, FloatObject)): - value = FloatObject(value) - return value - - def __repr__(self): - return "RectangleObject(%s)" % repr(list(self)) - - def getLowerLeft_x(self): - return self[0] - - def getLowerLeft_y(self): - return self[1] - - def getUpperRight_x(self): - return self[2] - - def getUpperRight_y(self): - return self[3] - - def getUpperLeft_x(self): - return self.getLowerLeft_x() - - def getUpperLeft_y(self): - return self.getUpperRight_y() - - def getLowerRight_x(self): - return self.getUpperRight_x() - - def getLowerRight_y(self): - return self.getLowerLeft_y() - - def getLowerLeft(self): - return self.getLowerLeft_x(), self.getLowerLeft_y() - - def getLowerRight(self): - return self.getLowerRight_x(), self.getLowerRight_y() - - def getUpperLeft(self): - return self.getUpperLeft_x(), self.getUpperLeft_y() - - def getUpperRight(self): - return self.getUpperRight_x(), self.getUpperRight_y() - - def setLowerLeft(self, value): - self[0], self[1] = [self.ensureIsNumber(x) for x in value] - - def setLowerRight(self, value): - self[2], self[1] = [self.ensureIsNumber(x) for x in value] - - def setUpperLeft(self, value): - self[0], self[3] = [self.ensureIsNumber(x) for x in value] - - def setUpperRight(self, value): - self[2], self[3] = [self.ensureIsNumber(x) for x in value] - - def getWidth(self): - return self.getUpperRight_x() - self.getLowerLeft_x() - - def getHeight(self): - return self.getUpperRight_y() - self.getLowerLeft_y() - - lowerLeft = property(getLowerLeft, setLowerLeft, None, None) - """ - Property to read and modify the lower left coordinate of this box - in (x,y) form. - """ - lowerRight = property(getLowerRight, setLowerRight, None, None) - """ - Property to read and modify the lower right coordinate of this box - in (x,y) form. - """ - upperLeft = property(getUpperLeft, setUpperLeft, None, None) - """ - Property to read and modify the upper left coordinate of this box - in (x,y) form. - """ - upperRight = property(getUpperRight, setUpperRight, None, None) - """ - Property to read and modify the upper right coordinate of this box - in (x,y) form. - """ - - -class Field(TreeObject): - """ - A class representing a field dictionary. This class is accessed through - :meth:`getFields()` - """ - def __init__(self, data): - DictionaryObject.__init__(self) - attributes = ("/FT", "/Parent", "/Kids", "/T", "/TU", "/TM", "/Ff", - "/V", "/DV", "/AA") - for attr in attributes: - try: - self[NameObject(attr)] = data[attr] - except KeyError: - pass - - fieldType = property(lambda self: self.get("/FT")) - """ - Read-only property accessing the type of this field. - """ - - parent = property(lambda self: self.get("/Parent")) - """ - Read-only property accessing the parent of this field. - """ - - kids = property(lambda self: self.get("/Kids")) - """ - Read-only property accessing the kids of this field. - """ - - name = property(lambda self: self.get("/T")) - """ - Read-only property accessing the name of this field. - """ - - altName = property(lambda self: self.get("/TU")) - """ - Read-only property accessing the alternate name of this field. - """ - - mappingName = property(lambda self: self.get("/TM")) - """ - Read-only property accessing the mapping name of this field. This - name is used by PyPDF2 as a key in the dictionary returned by - :meth:`getFields()` - """ - - flags = property(lambda self: self.get("/Ff")) - """ - Read-only property accessing the field flags, specifying various - characteristics of the field (see Table 8.70 of the PDF 1.7 reference). - """ - - value = property(lambda self: self.get("/V")) - """ - Read-only property accessing the value of this field. Format - varies based on field type. - """ - - defaultValue = property(lambda self: self.get("/DV")) - """ - Read-only property accessing the default value of this field. - """ - - additionalActions = property(lambda self: self.get("/AA")) - """ - Read-only property accessing the additional actions dictionary. - This dictionary defines the field's behavior in response to trigger events. - See Section 8.5.2 of the PDF 1.7 reference. - """ - - -class Destination(TreeObject): - """ - A class representing a destination within a PDF file. - See section 8.2.1 of the PDF 1.6 reference. - - :param str title: Title of this destination. - :param int page: Page number of this destination. - :param str typ: How the destination is displayed. - :param args: Additional arguments may be necessary depending on the type. - :raises PdfReadError: If destination type is invalid. - - Valid ``typ`` arguments (see PDF spec for details): - /Fit No additional arguments - /XYZ [left] [top] [zoomFactor] - /FitH [top] - /FitV [left] - /FitR [left] [bottom] [right] [top] - /FitB No additional arguments - /FitBH [top] - /FitBV [left] - """ - def __init__(self, title, page, typ, *args): - DictionaryObject.__init__(self) - self[NameObject("/Title")] = title - self[NameObject("/Page")] = page - self[NameObject("/Type")] = typ - - # from table 8.2 of the PDF 1.7 reference. - if typ == "/XYZ": - (self[NameObject("/Left")], self[NameObject("/Top")], - self[NameObject("/Zoom")]) = args - elif typ == "/FitR": - (self[NameObject("/Left")], self[NameObject("/Bottom")], - self[NameObject("/Right")], self[NameObject("/Top")]) = args - elif typ in ["/FitH", "/FitBH"]: - self[NameObject("/Top")], = args - elif typ in ["/FitV", "/FitBV"]: - self[NameObject("/Left")], = args - elif typ in ["/Fit", "/FitB"]: - pass - else: - raise utils.PdfReadError("Unknown Destination Type: %r" % typ) - - def getDestArray(self): - return ArrayObject([self.raw_get('/Page'), self['/Type']] + [self[x] for x in ['/Left', '/Bottom', '/Right', '/Top', '/Zoom'] if x in self]) - - def writeToStream(self, stream, encryption_key): - stream.write(b_("<<\n")) - key = NameObject('/D') - key.writeToStream(stream, encryption_key) - stream.write(b_(" ")) - value = self.getDestArray() - value.writeToStream(stream, encryption_key) - - key = NameObject("/S") - key.writeToStream(stream, encryption_key) - stream.write(b_(" ")) - value = NameObject("/GoTo") - value.writeToStream(stream, encryption_key) - - stream.write(b_("\n")) - stream.write(b_(">>")) - - title = property(lambda self: self.get("/Title")) - """ - Read-only property accessing the destination title. - - :rtype: str - """ - - page = property(lambda self: self.get("/Page")) - """ - Read-only property accessing the destination page number. - - :rtype: int - """ - - typ = property(lambda self: self.get("/Type")) - """ - Read-only property accessing the destination type. - - :rtype: str - """ - - zoom = property(lambda self: self.get("/Zoom", None)) - """ - Read-only property accessing the zoom factor. - - :rtype: int, or ``None`` if not available. - """ - - left = property(lambda self: self.get("/Left", None)) - """ - Read-only property accessing the left horizontal coordinate. - - :rtype: int, or ``None`` if not available. - """ - - right = property(lambda self: self.get("/Right", None)) - """ - Read-only property accessing the right horizontal coordinate. - - :rtype: int, or ``None`` if not available. - """ - - top = property(lambda self: self.get("/Top", None)) - """ - Read-only property accessing the top vertical coordinate. - - :rtype: int, or ``None`` if not available. - """ - - bottom = property(lambda self: self.get("/Bottom", None)) - """ - Read-only property accessing the bottom vertical coordinate. - - :rtype: int, or ``None`` if not available. - """ - - -class Bookmark(Destination): - def writeToStream(self, stream, encryption_key): - stream.write(b_("<<\n")) - for key in [NameObject(x) for x in ['/Title', '/Parent', '/First', '/Last', '/Next', '/Prev'] if x in self]: - key.writeToStream(stream, encryption_key) - stream.write(b_(" ")) - value = self.raw_get(key) - value.writeToStream(stream, encryption_key) - stream.write(b_("\n")) - key = NameObject('/Dest') - key.writeToStream(stream, encryption_key) - stream.write(b_(" ")) - value = self.getDestArray() - value.writeToStream(stream, encryption_key) - stream.write(b_("\n")) - stream.write(b_(">>")) - - -def encode_pdfdocencoding(unicode_string): - retval = b_('') - for c in unicode_string: - try: - retval += b_(chr(_pdfDocEncoding_rev[c])) - except KeyError: - raise UnicodeEncodeError("pdfdocencoding", c, -1, -1, - "does not exist in translation table") - return retval - - -def decode_pdfdocencoding(byte_array): - retval = u_('') - for b in byte_array: - c = _pdfDocEncoding[ord_(b)] - if c == u_('\u0000'): - raise UnicodeDecodeError("pdfdocencoding", utils.barray(b), -1, -1, - "does not exist in translation table") - retval += c - return retval - -_pdfDocEncoding = ( - u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), - u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), - u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), - u_('\u02d8'), u_('\u02c7'), u_('\u02c6'), u_('\u02d9'), u_('\u02dd'), u_('\u02db'), u_('\u02da'), u_('\u02dc'), - u_('\u0020'), u_('\u0021'), u_('\u0022'), u_('\u0023'), u_('\u0024'), u_('\u0025'), u_('\u0026'), u_('\u0027'), - u_('\u0028'), u_('\u0029'), u_('\u002a'), u_('\u002b'), u_('\u002c'), u_('\u002d'), u_('\u002e'), u_('\u002f'), - u_('\u0030'), u_('\u0031'), u_('\u0032'), u_('\u0033'), u_('\u0034'), u_('\u0035'), u_('\u0036'), u_('\u0037'), - u_('\u0038'), u_('\u0039'), u_('\u003a'), u_('\u003b'), u_('\u003c'), u_('\u003d'), u_('\u003e'), u_('\u003f'), - u_('\u0040'), u_('\u0041'), u_('\u0042'), u_('\u0043'), u_('\u0044'), u_('\u0045'), u_('\u0046'), u_('\u0047'), - u_('\u0048'), u_('\u0049'), u_('\u004a'), u_('\u004b'), u_('\u004c'), u_('\u004d'), u_('\u004e'), u_('\u004f'), - u_('\u0050'), u_('\u0051'), u_('\u0052'), u_('\u0053'), u_('\u0054'), u_('\u0055'), u_('\u0056'), u_('\u0057'), - u_('\u0058'), u_('\u0059'), u_('\u005a'), u_('\u005b'), u_('\u005c'), u_('\u005d'), u_('\u005e'), u_('\u005f'), - u_('\u0060'), u_('\u0061'), u_('\u0062'), u_('\u0063'), u_('\u0064'), u_('\u0065'), u_('\u0066'), u_('\u0067'), - u_('\u0068'), u_('\u0069'), u_('\u006a'), u_('\u006b'), u_('\u006c'), u_('\u006d'), u_('\u006e'), u_('\u006f'), - u_('\u0070'), u_('\u0071'), u_('\u0072'), u_('\u0073'), u_('\u0074'), u_('\u0075'), u_('\u0076'), u_('\u0077'), - u_('\u0078'), u_('\u0079'), u_('\u007a'), u_('\u007b'), u_('\u007c'), u_('\u007d'), u_('\u007e'), u_('\u0000'), - u_('\u2022'), u_('\u2020'), u_('\u2021'), u_('\u2026'), u_('\u2014'), u_('\u2013'), u_('\u0192'), u_('\u2044'), - u_('\u2039'), u_('\u203a'), u_('\u2212'), u_('\u2030'), u_('\u201e'), u_('\u201c'), u_('\u201d'), u_('\u2018'), - u_('\u2019'), u_('\u201a'), u_('\u2122'), u_('\ufb01'), u_('\ufb02'), u_('\u0141'), u_('\u0152'), u_('\u0160'), - u_('\u0178'), u_('\u017d'), u_('\u0131'), u_('\u0142'), u_('\u0153'), u_('\u0161'), u_('\u017e'), u_('\u0000'), - u_('\u20ac'), u_('\u00a1'), u_('\u00a2'), u_('\u00a3'), u_('\u00a4'), u_('\u00a5'), u_('\u00a6'), u_('\u00a7'), - u_('\u00a8'), u_('\u00a9'), u_('\u00aa'), u_('\u00ab'), u_('\u00ac'), u_('\u0000'), u_('\u00ae'), u_('\u00af'), - u_('\u00b0'), u_('\u00b1'), u_('\u00b2'), u_('\u00b3'), u_('\u00b4'), u_('\u00b5'), u_('\u00b6'), u_('\u00b7'), - u_('\u00b8'), u_('\u00b9'), u_('\u00ba'), u_('\u00bb'), u_('\u00bc'), u_('\u00bd'), u_('\u00be'), u_('\u00bf'), - u_('\u00c0'), u_('\u00c1'), u_('\u00c2'), u_('\u00c3'), u_('\u00c4'), u_('\u00c5'), u_('\u00c6'), u_('\u00c7'), - u_('\u00c8'), u_('\u00c9'), u_('\u00ca'), u_('\u00cb'), u_('\u00cc'), u_('\u00cd'), u_('\u00ce'), u_('\u00cf'), - u_('\u00d0'), u_('\u00d1'), u_('\u00d2'), u_('\u00d3'), u_('\u00d4'), u_('\u00d5'), u_('\u00d6'), u_('\u00d7'), - u_('\u00d8'), u_('\u00d9'), u_('\u00da'), u_('\u00db'), u_('\u00dc'), u_('\u00dd'), u_('\u00de'), u_('\u00df'), - u_('\u00e0'), u_('\u00e1'), u_('\u00e2'), u_('\u00e3'), u_('\u00e4'), u_('\u00e5'), u_('\u00e6'), u_('\u00e7'), - u_('\u00e8'), u_('\u00e9'), u_('\u00ea'), u_('\u00eb'), u_('\u00ec'), u_('\u00ed'), u_('\u00ee'), u_('\u00ef'), - u_('\u00f0'), u_('\u00f1'), u_('\u00f2'), u_('\u00f3'), u_('\u00f4'), u_('\u00f5'), u_('\u00f6'), u_('\u00f7'), - u_('\u00f8'), u_('\u00f9'), u_('\u00fa'), u_('\u00fb'), u_('\u00fc'), u_('\u00fd'), u_('\u00fe'), u_('\u00ff') -) - -assert len(_pdfDocEncoding) == 256 - -_pdfDocEncoding_rev = {} -for i in range(256): - char = _pdfDocEncoding[i] - if char == u_("\u0000"): - continue - assert char not in _pdfDocEncoding_rev - _pdfDocEncoding_rev[char] = i diff --git a/vendor/PyPDF2/merger.py b/vendor/PyPDF2/merger.py deleted file mode 100755 index 27702add..00000000 --- a/vendor/PyPDF2/merger.py +++ /dev/null @@ -1,553 +0,0 @@ -# vim: sw=4:expandtab:foldmethod=marker -# -# Copyright (c) 2006, Mathieu Fenniak -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -from .generic import * -from .utils import isString, str_ -from .pdf import PdfFileReader, PdfFileWriter -from .pagerange import PageRange -from sys import version_info -if version_info < ( 3, 0 ): - from cStringIO import StringIO - StreamIO = StringIO -else: - from io import BytesIO - from io import FileIO as file - StreamIO = BytesIO - - -class _MergedPage(object): - """ - _MergedPage is used internally by PdfFileMerger to collect necessary - information on each page that is being merged. - """ - def __init__(self, pagedata, src, id): - self.src = src - self.pagedata = pagedata - self.out_pagedata = None - self.id = id - - -class PdfFileMerger(object): - """ - Initializes a PdfFileMerger object. PdfFileMerger merges multiple PDFs - into a single PDF. It can concatenate, slice, insert, or any combination - of the above. - - See the functions :meth:`merge()` (or :meth:`append()`) - and :meth:`write()` for usage information. - - :param bool strict: Determines whether user should be warned of all - problems and also causes some correctable problems to be fatal. - Defaults to ``True``. - """ - - def __init__(self, strict=True): - self.inputs = [] - self.pages = [] - self.output = PdfFileWriter() - self.bookmarks = [] - self.named_dests = [] - self.id_count = 0 - self.strict = strict - - def merge(self, position, fileobj, bookmark=None, pages=None, import_bookmarks=True): - """ - Merges the pages from the given file into the output file at the - specified page number. - - :param int position: The *page number* to insert this file. File will - be inserted after the given number. - - :param fileobj: A File Object or an object that supports the standard read - and seek methods similar to a File Object. Could also be a - string representing a path to a PDF file. - - :param str bookmark: Optionally, you may specify a bookmark to be applied at - the beginning of the included file by supplying the text of the bookmark. - - :param pages: can be a :ref:`Page Range ` or a ``(start, stop[, step])`` tuple - to merge only the specified range of pages from the source - document into the output document. - - :param bool import_bookmarks: You may prevent the source document's bookmarks - from being imported by specifying this as ``False``. - """ - - # This parameter is passed to self.inputs.append and means - # that the stream used was created in this method. - my_file = False - - # If the fileobj parameter is a string, assume it is a path - # and create a file object at that location. If it is a file, - # copy the file's contents into a BytesIO (or StreamIO) stream object; if - # it is a PdfFileReader, copy that reader's stream into a - # BytesIO (or StreamIO) stream. - # If fileobj is none of the above types, it is not modified - decryption_key = None - if isString(fileobj): - fileobj = file(fileobj, 'rb') - my_file = True - elif isinstance(fileobj, file): - fileobj.seek(0) - filecontent = fileobj.read() - fileobj = StreamIO(filecontent) - my_file = True - elif isinstance(fileobj, PdfFileReader): - orig_tell = fileobj.stream.tell() - fileobj.stream.seek(0) - filecontent = StreamIO(fileobj.stream.read()) - fileobj.stream.seek(orig_tell) # reset the stream to its original location - fileobj = filecontent - if hasattr(fileobj, '_decryption_key'): - decryption_key = fileobj._decryption_key - my_file = True - - # Create a new PdfFileReader instance using the stream - # (either file or BytesIO or StringIO) created above - pdfr = PdfFileReader(fileobj, strict=self.strict) - if decryption_key is not None: - pdfr._decryption_key = decryption_key - - # Find the range of pages to merge. - if pages == None: - pages = (0, pdfr.getNumPages()) - elif isinstance(pages, PageRange): - pages = pages.indices(pdfr.getNumPages()) - elif not isinstance(pages, tuple): - raise TypeError('"pages" must be a tuple of (start, stop[, step])') - - srcpages = [] - if bookmark: - bookmark = Bookmark(TextStringObject(bookmark), NumberObject(self.id_count), NameObject('/Fit')) - - outline = [] - if import_bookmarks: - outline = pdfr.getOutlines() - outline = self._trim_outline(pdfr, outline, pages) - - if bookmark: - self.bookmarks += [bookmark, outline] - else: - self.bookmarks += outline - - dests = pdfr.namedDestinations - dests = self._trim_dests(pdfr, dests, pages) - self.named_dests += dests - - # Gather all the pages that are going to be merged - for i in range(*pages): - pg = pdfr.getPage(i) - - id = self.id_count - self.id_count += 1 - - mp = _MergedPage(pg, pdfr, id) - - srcpages.append(mp) - - self._associate_dests_to_pages(srcpages) - self._associate_bookmarks_to_pages(srcpages) - - # Slice to insert the pages at the specified position - self.pages[position:position] = srcpages - - # Keep track of our input files so we can close them later - self.inputs.append((fileobj, pdfr, my_file)) - - def append(self, fileobj, bookmark=None, pages=None, import_bookmarks=True): - """ - Identical to the :meth:`merge()` method, but assumes you want to concatenate - all pages onto the end of the file instead of specifying a position. - - :param fileobj: A File Object or an object that supports the standard read - and seek methods similar to a File Object. Could also be a - string representing a path to a PDF file. - - :param str bookmark: Optionally, you may specify a bookmark to be applied at - the beginning of the included file by supplying the text of the bookmark. - - :param pages: can be a :ref:`Page Range ` or a ``(start, stop[, step])`` tuple - to merge only the specified range of pages from the source - document into the output document. - - :param bool import_bookmarks: You may prevent the source document's bookmarks - from being imported by specifying this as ``False``. - """ - - self.merge(len(self.pages), fileobj, bookmark, pages, import_bookmarks) - - def write(self, fileobj): - """ - Writes all data that has been merged to the given output file. - - :param fileobj: Output file. Can be a filename or any kind of - file-like object. - """ - my_file = False - if isString(fileobj): - fileobj = file(fileobj, 'wb') - my_file = True - - # Add pages to the PdfFileWriter - # The commented out line below was replaced with the two lines below it to allow PdfFileMerger to work with PyPdf 1.13 - for page in self.pages: - self.output.addPage(page.pagedata) - page.out_pagedata = self.output.getReference(self.output._pages.getObject()["/Kids"][-1].getObject()) - #idnum = self.output._objects.index(self.output._pages.getObject()["/Kids"][-1].getObject()) + 1 - #page.out_pagedata = IndirectObject(idnum, 0, self.output) - - # Once all pages are added, create bookmarks to point at those pages - self._write_dests() - self._write_bookmarks() - - # Write the output to the file - self.output.write(fileobj) - - if my_file: - fileobj.close() - - def close(self): - """ - Shuts all file descriptors (input and output) and clears all memory - usage. - """ - self.pages = [] - for fo, pdfr, mine in self.inputs: - if mine: - fo.close() - - self.inputs = [] - self.output = None - - def addMetadata(self, infos): - """ - Add custom metadata to the output. - - :param dict infos: a Python dictionary where each key is a field - and each value is your new metadata. - Example: ``{u'/Title': u'My title'}`` - """ - self.output.addMetadata(infos) - - def setPageLayout(self, layout): - """ - Set the page layout - - :param str layout: The page layout to be used - - Valid layouts are: - /NoLayout Layout explicitly not specified - /SinglePage Show one page at a time - /OneColumn Show one column at a time - /TwoColumnLeft Show pages in two columns, odd-numbered pages on the left - /TwoColumnRight Show pages in two columns, odd-numbered pages on the right - /TwoPageLeft Show two pages at a time, odd-numbered pages on the left - /TwoPageRight Show two pages at a time, odd-numbered pages on the right - """ - self.output.setPageLayout(layout) - - def setPageMode(self, mode): - """ - Set the page mode. - - :param str mode: The page mode to use. - - Valid modes are: - /UseNone Do not show outlines or thumbnails panels - /UseOutlines Show outlines (aka bookmarks) panel - /UseThumbs Show page thumbnails panel - /FullScreen Fullscreen view - /UseOC Show Optional Content Group (OCG) panel - /UseAttachments Show attachments panel - """ - self.output.setPageMode(mode) - - def _trim_dests(self, pdf, dests, pages): - """ - Removes any named destinations that are not a part of the specified - page set. - """ - new_dests = [] - prev_header_added = True - for k, o in list(dests.items()): - for j in range(*pages): - if pdf.getPage(j).getObject() == o['/Page'].getObject(): - o[NameObject('/Page')] = o['/Page'].getObject() - assert str_(k) == str_(o['/Title']) - new_dests.append(o) - break - return new_dests - - def _trim_outline(self, pdf, outline, pages): - """ - Removes any outline/bookmark entries that are not a part of the - specified page set. - """ - new_outline = [] - prev_header_added = True - for i, o in enumerate(outline): - if isinstance(o, list): - sub = self._trim_outline(pdf, o, pages) - if sub: - if not prev_header_added: - new_outline.append(outline[i-1]) - new_outline.append(sub) - else: - prev_header_added = False - for j in range(*pages): - if pdf.getPage(j).getObject() == o['/Page'].getObject(): - o[NameObject('/Page')] = o['/Page'].getObject() - new_outline.append(o) - prev_header_added = True - break - return new_outline - - def _write_dests(self): - dests = self.named_dests - - for v in dests: - pageno = None - pdf = None - if '/Page' in v: - for i, p in enumerate(self.pages): - if p.id == v['/Page']: - v[NameObject('/Page')] = p.out_pagedata - pageno = i - pdf = p.src - break - if pageno != None: - self.output.addNamedDestinationObject(v) - - def _write_bookmarks(self, bookmarks=None, parent=None): - - if bookmarks == None: - bookmarks = self.bookmarks - - last_added = None - for b in bookmarks: - if isinstance(b, list): - self._write_bookmarks(b, last_added) - continue - - pageno = None - pdf = None - if '/Page' in b: - for i, p in enumerate(self.pages): - if p.id == b['/Page']: - #b[NameObject('/Page')] = p.out_pagedata - args = [NumberObject(p.id), NameObject(b['/Type'])] - #nothing more to add - #if b['/Type'] == '/Fit' or b['/Type'] == '/FitB' - if b['/Type'] == '/FitH' or b['/Type'] == '/FitBH': - if '/Top' in b and not isinstance(b['/Top'], NullObject): - args.append(FloatObject(b['/Top'])) - else: - args.append(FloatObject(0)) - del b['/Top'] - elif b['/Type'] == '/FitV' or b['/Type'] == '/FitBV': - if '/Left' in b and not isinstance(b['/Left'], NullObject): - args.append(FloatObject(b['/Left'])) - else: - args.append(FloatObject(0)) - del b['/Left'] - elif b['/Type'] == '/XYZ': - if '/Left' in b and not isinstance(b['/Left'], NullObject): - args.append(FloatObject(b['/Left'])) - else: - args.append(FloatObject(0)) - if '/Top' in b and not isinstance(b['/Top'], NullObject): - args.append(FloatObject(b['/Top'])) - else: - args.append(FloatObject(0)) - if '/Zoom' in b and not isinstance(b['/Zoom'], NullObject): - args.append(FloatObject(b['/Zoom'])) - else: - args.append(FloatObject(0)) - del b['/Top'], b['/Zoom'], b['/Left'] - elif b['/Type'] == '/FitR': - if '/Left' in b and not isinstance(b['/Left'], NullObject): - args.append(FloatObject(b['/Left'])) - else: - args.append(FloatObject(0)) - if '/Bottom' in b and not isinstance(b['/Bottom'], NullObject): - args.append(FloatObject(b['/Bottom'])) - else: - args.append(FloatObject(0)) - if '/Right' in b and not isinstance(b['/Right'], NullObject): - args.append(FloatObject(b['/Right'])) - else: - args.append(FloatObject(0)) - if '/Top' in b and not isinstance(b['/Top'], NullObject): - args.append(FloatObject(b['/Top'])) - else: - args.append(FloatObject(0)) - del b['/Left'], b['/Right'], b['/Bottom'], b['/Top'] - - b[NameObject('/A')] = DictionaryObject({NameObject('/S'): NameObject('/GoTo'), NameObject('/D'): ArrayObject(args)}) - - pageno = i - pdf = p.src - break - if pageno != None: - del b['/Page'], b['/Type'] - last_added = self.output.addBookmarkDict(b, parent) - - def _associate_dests_to_pages(self, pages): - for nd in self.named_dests: - pageno = None - np = nd['/Page'] - - if isinstance(np, NumberObject): - continue - - for p in pages: - if np.getObject() == p.pagedata.getObject(): - pageno = p.id - - if pageno != None: - nd[NameObject('/Page')] = NumberObject(pageno) - else: - raise ValueError("Unresolved named destination '%s'" % (nd['/Title'],)) - - def _associate_bookmarks_to_pages(self, pages, bookmarks=None): - if bookmarks == None: - bookmarks = self.bookmarks - - for b in bookmarks: - if isinstance(b, list): - self._associate_bookmarks_to_pages(pages, b) - continue - - pageno = None - bp = b['/Page'] - - if isinstance(bp, NumberObject): - continue - - for p in pages: - if bp.getObject() == p.pagedata.getObject(): - pageno = p.id - - if pageno != None: - b[NameObject('/Page')] = NumberObject(pageno) - else: - raise ValueError("Unresolved bookmark '%s'" % (b['/Title'],)) - - def findBookmark(self, bookmark, root=None): - if root == None: - root = self.bookmarks - - for i, b in enumerate(root): - if isinstance(b, list): - res = self.findBookmark(bookmark, b) - if res: - return [i] + res - elif b == bookmark or b['/Title'] == bookmark: - return [i] - - return None - - def addBookmark(self, title, pagenum, parent=None): - """ - Add a bookmark to this PDF file. - - :param str title: Title to use for this bookmark. - :param int pagenum: Page number this bookmark will point to. - :param parent: A reference to a parent bookmark to create nested - bookmarks. - """ - if parent == None: - iloc = [len(self.bookmarks)-1] - elif isinstance(parent, list): - iloc = parent - else: - iloc = self.findBookmark(parent) - - dest = Bookmark(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826)) - - if parent == None: - self.bookmarks.append(dest) - else: - bmparent = self.bookmarks - for i in iloc[:-1]: - bmparent = bmparent[i] - npos = iloc[-1]+1 - if npos < len(bmparent) and isinstance(bmparent[npos], list): - bmparent[npos].append(dest) - else: - bmparent.insert(npos, [dest]) - return dest - - def addNamedDestination(self, title, pagenum): - """ - Add a destination to the output. - - :param str title: Title to use - :param int pagenum: Page number this destination points at. - """ - - dest = Destination(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826)) - self.named_dests.append(dest) - - -class OutlinesObject(list): - def __init__(self, pdf, tree, parent=None): - list.__init__(self) - self.tree = tree - self.pdf = pdf - self.parent = parent - - def remove(self, index): - obj = self[index] - del self[index] - self.tree.removeChild(obj) - - def add(self, title, pagenum): - pageRef = self.pdf.getObject(self.pdf._pages)['/Kids'][pagenum] - action = DictionaryObject() - action.update({ - NameObject('/D') : ArrayObject([pageRef, NameObject('/FitH'), NumberObject(826)]), - NameObject('/S') : NameObject('/GoTo') - }) - actionRef = self.pdf._addObject(action) - bookmark = TreeObject() - - bookmark.update({ - NameObject('/A'): actionRef, - NameObject('/Title'): createStringObject(title), - }) - - self.pdf._addObject(bookmark) - - self.tree.addChild(bookmark) - - def removeAll(self): - for child in [x for x in self.tree.children()]: - self.tree.removeChild(child) - self.pop() diff --git a/vendor/PyPDF2/pagerange.py b/vendor/PyPDF2/pagerange.py deleted file mode 100755 index ce96ec5f..00000000 --- a/vendor/PyPDF2/pagerange.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python -""" -Representation and utils for ranges of PDF file pages. - -Copyright (c) 2014, Steve Witham . -All rights reserved. This software is available under a BSD license; -see https://github.com/mstamy2/PyPDF2/blob/master/LICENSE -""" - -import re -from .utils import isString - -_INT_RE = r"(0|-?[1-9]\d*)" # A decimal int, don't allow "-0". -PAGE_RANGE_RE = "^({int}|({int}?(:{int}?(:{int}?)?)))$".format(int=_INT_RE) -# groups: 12 34 5 6 7 8 - - -class ParseError(Exception): - pass - - -PAGE_RANGE_HELP = """Remember, page indices start with zero. - Page range expression examples: - : all pages. -1 last page. - 22 just the 23rd page. :-1 all but the last page. - 0:3 the first three pages. -2 second-to-last page. - :3 the first three pages. -2: last two pages. - 5: from the sixth page onward. -3:-1 third & second to last. - The third, "stride" or "step" number is also recognized. - ::2 0 2 4 ... to the end. 3:0:-1 3 2 1 but not 0. - 1:10:2 1 3 5 7 9 2::-1 2 1 0. - ::-1 all pages in reverse order. -""" - - -class PageRange(object): - """ - A slice-like representation of a range of page indices, - i.e. page numbers, only starting at zero. - The syntax is like what you would put between brackets [ ]. - The slice is one of the few Python types that can't be subclassed, - but this class converts to and from slices, and allows similar use. - o PageRange(str) parses a string representing a page range. - o PageRange(slice) directly "imports" a slice. - o to_slice() gives the equivalent slice. - o str() and repr() allow printing. - o indices(n) is like slice.indices(n). - """ - - def __init__(self, arg): - """ - Initialize with either a slice -- giving the equivalent page range, - or a PageRange object -- making a copy, - or a string like - "int", "[int]:[int]" or "[int]:[int]:[int]", - where the brackets indicate optional ints. - {page_range_help} - Note the difference between this notation and arguments to slice(): - slice(3) means the first three pages; - PageRange("3") means the range of only the fourth page. - However PageRange(slice(3)) means the first three pages. - """ - if isinstance(arg, slice): - self._slice = arg - return - - if isinstance(arg, PageRange): - self._slice = arg.to_slice() - return - - m = isString(arg) and re.match(PAGE_RANGE_RE, arg) - if not m: - raise ParseError(arg) - elif m.group(2): - # Special case: just an int means a range of one page. - start = int(m.group(2)) - stop = start + 1 if start != -1 else None - self._slice = slice(start, stop) - else: - self._slice = slice(*[int(g) if g else None - for g in m.group(4, 6, 8)]) - - # Just formatting this when there is __doc__ for __init__ - if __init__.__doc__: - __init__.__doc__ = __init__.__doc__.format(page_range_help=PAGE_RANGE_HELP) - - @staticmethod - def valid(input): - """ True if input is a valid initializer for a PageRange. """ - return isinstance(input, slice) or \ - isinstance(input, PageRange) or \ - (isString(input) - and bool(re.match(PAGE_RANGE_RE, input))) - - def to_slice(self): - """ Return the slice equivalent of this page range. """ - return self._slice - - def __str__(self): - """ A string like "1:2:3". """ - s = self._slice - if s.step == None: - if s.start != None and s.stop == s.start + 1: - return str(s.start) - - indices = s.start, s.stop - else: - indices = s.start, s.stop, s.step - return ':'.join("" if i == None else str(i) for i in indices) - - def __repr__(self): - """ A string like "PageRange('1:2:3')". """ - return "PageRange(" + repr(str(self)) + ")" - - def indices(self, n): - """ - n is the length of the list of pages to choose from. - Returns arguments for range(). See help(slice.indices). - """ - return self._slice.indices(n) - - -PAGE_RANGE_ALL = PageRange(":") # The range of all pages. - - -def parse_filename_page_ranges(args): - """ - Given a list of filenames and page ranges, return a list of - (filename, page_range) pairs. - First arg must be a filename; other ags are filenames, page-range - expressions, slice objects, or PageRange objects. - A filename not followed by a page range indicates all pages of the file. - """ - pairs = [] - pdf_filename = None - did_page_range = False - for arg in args + [None]: - if PageRange.valid(arg): - if not pdf_filename: - raise ValueError("The first argument must be a filename, " \ - "not a page range.") - - pairs.append( (pdf_filename, PageRange(arg)) ) - did_page_range = True - else: - # New filename or end of list--do all of the previous file? - if pdf_filename and not did_page_range: - pairs.append( (pdf_filename, PAGE_RANGE_ALL) ) - - pdf_filename = arg - did_page_range = False - return pairs diff --git a/vendor/PyPDF2/pdf.py b/vendor/PyPDF2/pdf.py deleted file mode 100755 index 9979414f..00000000 --- a/vendor/PyPDF2/pdf.py +++ /dev/null @@ -1,3004 +0,0 @@ -# -*- coding: utf-8 -*- -# -# vim: sw=4:expandtab:foldmethod=marker -# -# Copyright (c) 2006, Mathieu Fenniak -# Copyright (c) 2007, Ashish Kulkarni -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -A pure-Python PDF library with an increasing number of capabilities. -See README for links to FAQ, documentation, homepage, etc. -""" - -__author__ = "Mathieu Fenniak" -__author_email__ = "biziqe@mathieu.fenniak.net" - -__maintainer__ = "Phaseit, Inc." -__maintainer_email = "PyPDF2@phaseit.net" - -import string -import math -import struct -import sys -import uuid -from sys import version_info -if version_info < ( 3, 0 ): - from cStringIO import StringIO -else: - from io import StringIO - -if version_info < ( 3, 0 ): - BytesIO = StringIO -else: - from io import BytesIO - -from . import filters -from . import utils -import warnings -import codecs -from .generic import * -from .utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList -from .utils import isString, b_, u_, ord_, chr_, str_, formatWarning - -if version_info < ( 2, 4 ): - from sets import ImmutableSet as frozenset - -if version_info < ( 2, 5 ): - from md5 import md5 -else: - from hashlib import md5 -import uuid - - -class PdfFileWriter(object): - """ - This class supports writing PDF files out, given pages produced by another - class (typically :class:`PdfFileReader`). - """ - def __init__(self): - self._header = b_("%PDF-1.3") - self._objects = [] # array of indirect objects - - # The root of our page tree node. - pages = DictionaryObject() - pages.update({ - NameObject("/Type"): NameObject("/Pages"), - NameObject("/Count"): NumberObject(0), - NameObject("/Kids"): ArrayObject(), - }) - self._pages = self._addObject(pages) - - # info object - info = DictionaryObject() - info.update({ - NameObject("/Producer"): createStringObject(codecs.BOM_UTF16_BE + u_("PyPDF2").encode('utf-16be')) - }) - self._info = self._addObject(info) - - # root object - root = DictionaryObject() - root.update({ - NameObject("/Type"): NameObject("/Catalog"), - NameObject("/Pages"): self._pages, - }) - self._root = None - self._root_object = root - - def _addObject(self, obj): - self._objects.append(obj) - return IndirectObject(len(self._objects), 0, self) - - def getObject(self, ido): - if ido.pdf != self: - raise ValueError("pdf must be self") - return self._objects[ido.idnum - 1] - - def _addPage(self, page, action): - assert page["/Type"] == "/Page" - page[NameObject("/Parent")] = self._pages - page = self._addObject(page) - pages = self.getObject(self._pages) - action(pages["/Kids"], page) - pages[NameObject("/Count")] = NumberObject(pages["/Count"] + 1) - - def addPage(self, page): - """ - Adds a page to this PDF file. The page is usually acquired from a - :class:`PdfFileReader` instance. - - :param PageObject page: The page to add to the document. Should be - an instance of :class:`PageObject` - """ - self._addPage(page, list.append) - - def insertPage(self, page, index=0): - """ - Insert a page in this PDF file. The page is usually acquired from a - :class:`PdfFileReader` instance. - - :param PageObject page: The page to add to the document. This - argument should be an instance of :class:`PageObject`. - :param int index: Position at which the page will be inserted. - """ - self._addPage(page, lambda l, p: l.insert(index, p)) - - def getPage(self, pageNumber): - """ - Retrieves a page by number from this PDF file. - - :param int pageNumber: The page number to retrieve - (pages begin at zero) - :return: the page at the index given by *pageNumber* - :rtype: :class:`PageObject` - """ - pages = self.getObject(self._pages) - # XXX: crude hack - return pages["/Kids"][pageNumber].getObject() - - def getNumPages(self): - """ - :return: the number of pages. - :rtype: int - """ - pages = self.getObject(self._pages) - return int(pages[NameObject("/Count")]) - - def addBlankPage(self, width=None, height=None): - """ - Appends a blank page to this PDF file and returns it. If no page size - is specified, use the size of the last page. - - :param float width: The width of the new page expressed in default user - space units. - :param float height: The height of the new page expressed in default - user space units. - :return: the newly appended page - :rtype: :class:`PageObject` - :raises PageSizeNotDefinedError: if width and height are not defined - and previous page does not exist. - """ - page = PageObject.createBlankPage(self, width, height) - self.addPage(page) - return page - - def insertBlankPage(self, width=None, height=None, index=0): - """ - Inserts a blank page to this PDF file and returns it. If no page size - is specified, use the size of the last page. - - :param float width: The width of the new page expressed in default user - space units. - :param float height: The height of the new page expressed in default - user space units. - :param int index: Position to add the page. - :return: the newly appended page - :rtype: :class:`PageObject` - :raises PageSizeNotDefinedError: if width and height are not defined - and previous page does not exist. - """ - if width is None or height is None and \ - (self.getNumPages() - 1) >= index: - oldpage = self.getPage(index) - width = oldpage.mediaBox.getWidth() - height = oldpage.mediaBox.getHeight() - page = PageObject.createBlankPage(self, width, height) - self.insertPage(page, index) - return page - - def addJS(self, javascript): - """ - Add Javascript which will launch upon opening this PDF. - - :param str javascript: Your Javascript. - - >>> output.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});") - # Example: This will launch the print window when the PDF is opened. - """ - js = DictionaryObject() - js.update({ - NameObject("/Type"): NameObject("/Action"), - NameObject("/S"): NameObject("/JavaScript"), - NameObject("/JS"): NameObject("(%s)" % javascript) - }) - js_indirect_object = self._addObject(js) - - # We need a name for parameterized javascript in the pdf file, but it can be anything. - js_string_name = str(uuid.uuid4()) - - js_name_tree = DictionaryObject() - js_name_tree.update({ - NameObject("/JavaScript"): DictionaryObject({ - NameObject("/Names"): ArrayObject([createStringObject(js_string_name), js_indirect_object]) - }) - }) - self._addObject(js_name_tree) - - self._root_object.update({ - NameObject("/OpenAction"): js_indirect_object, - NameObject("/Names"): js_name_tree - }) - - def addAttachment(self, fname, fdata): - """ - Embed a file inside the PDF. - - :param str fname: The filename to display. - :param str fdata: The data in the file. - - Reference: - https://www.adobe.com/content/dam/Adobe/en/devnet/acrobat/pdfs/PDF32000_2008.pdf - Section 7.11.3 - """ - - # We need 3 entries: - # * The file's data - # * The /Filespec entry - # * The file's name, which goes in the Catalog - - - # The entry for the file - """ Sample: - 8 0 obj - << - /Length 12 - /Type /EmbeddedFile - >> - stream - Hello world! - endstream - endobj - """ - file_entry = DecodedStreamObject() - file_entry.setData(fdata) - file_entry.update({ - NameObject("/Type"): NameObject("/EmbeddedFile") - }) - - # The Filespec entry - """ Sample: - 7 0 obj - << - /Type /Filespec - /F (hello.txt) - /EF << /F 8 0 R >> - >> - """ - efEntry = DictionaryObject() - efEntry.update({ NameObject("/F"):file_entry }) - - filespec = DictionaryObject() - filespec.update({ - NameObject("/Type"): NameObject("/Filespec"), - NameObject("/F"): createStringObject(fname), # Perhaps also try TextStringObject - NameObject("/EF"): efEntry - }) - - # Then create the entry for the root, as it needs a reference to the Filespec - """ Sample: - 1 0 obj - << - /Type /Catalog - /Outlines 2 0 R - /Pages 3 0 R - /Names << /EmbeddedFiles << /Names [(hello.txt) 7 0 R] >> >> - >> - endobj - - """ - embeddedFilesNamesDictionary = DictionaryObject() - embeddedFilesNamesDictionary.update({ - NameObject("/Names"): ArrayObject([createStringObject(fname), filespec]) - }) - - embeddedFilesDictionary = DictionaryObject() - embeddedFilesDictionary.update({ - NameObject("/EmbeddedFiles"): embeddedFilesNamesDictionary - }) - # Update the root - self._root_object.update({ - NameObject("/Names"): embeddedFilesDictionary - }) - - def appendPagesFromReader(self, reader, after_page_append=None): - """ - Copy pages from reader to writer. Includes an optional callback parameter - which is invoked after pages are appended to the writer. - - :param reader: a PdfFileReader object from which to copy page - annotations to this writer object. The writer's annots - will then be updated - :callback after_page_append (function): Callback function that is invoked after - each page is appended to the writer. Callback signature: - - :param writer_pageref (PDF page reference): Reference to the page - appended to the writer. - """ - # Get page count from writer and reader - reader_num_pages = reader.getNumPages() - writer_num_pages = self.getNumPages() - - # Copy pages from reader to writer - for rpagenum in range(0, reader_num_pages): - reader_page = reader.getPage(rpagenum) - self.addPage(reader_page) - writer_page = self.getPage(writer_num_pages+rpagenum) - # Trigger callback, pass writer page as parameter - if callable(after_page_append): after_page_append(writer_page) - - def updatePageFormFieldValues(self, page, fields): - ''' - Update the form field values for a given page from a fields dictionary. - Copy field texts and values from fields to page. - - :param page: Page reference from PDF writer where the annotations - and field data will be updated. - :param fields: a Python dictionary of field names (/T) and text - values (/V) - ''' - # Iterate through pages, update field values - for j in range(0, len(page['/Annots'])): - writer_annot = page['/Annots'][j].getObject() - for field in fields: - if writer_annot.get('/T') == field: - writer_annot.update({ - NameObject("/V"): TextStringObject(fields[field]) - }) - - def cloneReaderDocumentRoot(self, reader): - ''' - Copy the reader document root to the writer. - - :param reader: PdfFileReader from the document root should be copied. - :callback after_page_append - ''' - self._root_object = reader.trailer['/Root'] - - def cloneDocumentFromReader(self, reader, after_page_append=None): - ''' - Create a copy (clone) of a document from a PDF file reader - - :param reader: PDF file reader instance from which the clone - should be created. - :callback after_page_append (function): Callback function that is invoked after - each page is appended to the writer. Signature includes a reference to the - appended page (delegates to appendPagesFromReader). Callback signature: - - :param writer_pageref (PDF page reference): Reference to the page just - appended to the document. - ''' - self.cloneReaderDocumentRoot(reader) - self.appendPagesFromReader(reader, after_page_append) - - def encrypt(self, user_pwd, owner_pwd = None, use_128bit = True): - """ - Encrypt this PDF file with the PDF Standard encryption handler. - - :param str user_pwd: The "user password", which allows for opening - and reading the PDF file with the restrictions provided. - :param str owner_pwd: The "owner password", which allows for - opening the PDF files without any restrictions. By default, - the owner password is the same as the user password. - :param bool use_128bit: flag as to whether to use 128bit - encryption. When false, 40bit encryption will be used. By default, - this flag is on. - """ - import time, random - if owner_pwd == None: - owner_pwd = user_pwd - if use_128bit: - V = 2 - rev = 3 - keylen = int(128 / 8) - else: - V = 1 - rev = 2 - keylen = int(40 / 8) - # permit everything: - P = -1 - O = ByteStringObject(_alg33(owner_pwd, user_pwd, rev, keylen)) - ID_1 = ByteStringObject(md5(b_(repr(time.time()))).digest()) - ID_2 = ByteStringObject(md5(b_(repr(random.random()))).digest()) - self._ID = ArrayObject((ID_1, ID_2)) - if rev == 2: - U, key = _alg34(user_pwd, O, P, ID_1) - else: - assert rev == 3 - U, key = _alg35(user_pwd, rev, keylen, O, P, ID_1, False) - encrypt = DictionaryObject() - encrypt[NameObject("/Filter")] = NameObject("/Standard") - encrypt[NameObject("/V")] = NumberObject(V) - if V == 2: - encrypt[NameObject("/Length")] = NumberObject(keylen * 8) - encrypt[NameObject("/R")] = NumberObject(rev) - encrypt[NameObject("/O")] = ByteStringObject(O) - encrypt[NameObject("/U")] = ByteStringObject(U) - encrypt[NameObject("/P")] = NumberObject(P) - self._encrypt = self._addObject(encrypt) - self._encrypt_key = key - - def write(self, stream): - """ - Writes the collection of pages added to this object out as a PDF file. - - :param stream: An object to write the file to. The object must support - the write method and the tell method, similar to a file object. - """ - if hasattr(stream, 'mode') and 'b' not in stream.mode: - warnings.warn("File <%s> to write to is not in binary mode. It may not be written to correctly." % stream.name) - debug = False - import struct - - if not self._root: - self._root = self._addObject(self._root_object) - - externalReferenceMap = {} - - # PDF objects sometimes have circular references to their /Page objects - # inside their object tree (for example, annotations). Those will be - # indirect references to objects that we've recreated in this PDF. To - # address this problem, PageObject's store their original object - # reference number, and we add it to the external reference map before - # we sweep for indirect references. This forces self-page-referencing - # trees to reference the correct new object location, rather than - # copying in a new copy of the page object. - for objIndex in range(len(self._objects)): - obj = self._objects[objIndex] - if isinstance(obj, PageObject) and obj.indirectRef != None: - data = obj.indirectRef - if data.pdf not in externalReferenceMap: - externalReferenceMap[data.pdf] = {} - if data.generation not in externalReferenceMap[data.pdf]: - externalReferenceMap[data.pdf][data.generation] = {} - externalReferenceMap[data.pdf][data.generation][data.idnum] = IndirectObject(objIndex + 1, 0, self) - - self.stack = [] - if debug: print(("ERM:", externalReferenceMap, "root:", self._root)) - self._sweepIndirectReferences(externalReferenceMap, self._root) - del self.stack - - # Begin writing: - object_positions = [] - stream.write(self._header + b_("\n")) - for i in range(len(self._objects)): - idnum = (i + 1) - obj = self._objects[i] - object_positions.append(stream.tell()) - stream.write(b_(str(idnum) + " 0 obj\n")) - key = None - if hasattr(self, "_encrypt") and idnum != self._encrypt.idnum: - pack1 = struct.pack("` for details. - """ - pageRef = self.getObject(self._pages)['/Kids'][pagenum] - action = DictionaryObject() - zoomArgs = [] - for a in args: - if a is not None: - zoomArgs.append(NumberObject(a)) - else: - zoomArgs.append(NullObject()) - dest = Destination(NameObject("/"+title + " bookmark"), pageRef, NameObject(fit), *zoomArgs) - destArray = dest.getDestArray() - action.update({ - NameObject('/D') : destArray, - NameObject('/S') : NameObject('/GoTo') - }) - actionRef = self._addObject(action) - - outlineRef = self.getOutlineRoot() - - if parent == None: - parent = outlineRef - - bookmark = TreeObject() - - bookmark.update({ - NameObject('/A'): actionRef, - NameObject('/Title'): createStringObject(title), - }) - - if color is not None: - bookmark.update({NameObject('/C'): ArrayObject([FloatObject(c) for c in color])}) - - format = 0 - if italic: - format += 1 - if bold: - format += 2 - if format: - bookmark.update({NameObject('/F'): NumberObject(format)}) - - bookmarkRef = self._addObject(bookmark) - - parent = parent.getObject() - parent.addChild(bookmarkRef, self) - - return bookmarkRef - - def addNamedDestinationObject(self, dest): - destRef = self._addObject(dest) - - nd = self.getNamedDestRoot() - nd.extend([dest['/Title'], destRef]) - - return destRef - - def addNamedDestination(self, title, pagenum): - pageRef = self.getObject(self._pages)['/Kids'][pagenum] - dest = DictionaryObject() - dest.update({ - NameObject('/D') : ArrayObject([pageRef, NameObject('/FitH'), NumberObject(826)]), - NameObject('/S') : NameObject('/GoTo') - }) - - destRef = self._addObject(dest) - nd = self.getNamedDestRoot() - - nd.extend([title, destRef]) - - return destRef - - def removeLinks(self): - """ - Removes links and annotations from this output. - """ - pages = self.getObject(self._pages)['/Kids'] - for page in pages: - pageRef = self.getObject(page) - if "/Annots" in pageRef: - del pageRef['/Annots'] - - def removeImages(self, ignoreByteStringObject=False): - """ - Removes images from this output. - - :param bool ignoreByteStringObject: optional parameter - to ignore ByteString Objects. - """ - pages = self.getObject(self._pages)['/Kids'] - for j in range(len(pages)): - page = pages[j] - pageRef = self.getObject(page) - content = pageRef['/Contents'].getObject() - if not isinstance(content, ContentStream): - content = ContentStream(content, pageRef) - - _operations = [] - seq_graphics = False - for operands, operator in content.operations: - if operator == b_('Tj'): - text = operands[0] - if ignoreByteStringObject: - if not isinstance(text, TextStringObject): - operands[0] = TextStringObject() - elif operator == b_("'"): - text = operands[0] - if ignoreByteStringObject: - if not isinstance(text, TextStringObject): - operands[0] = TextStringObject() - elif operator == b_('"'): - text = operands[2] - if ignoreByteStringObject: - if not isinstance(text, TextStringObject): - operands[2] = TextStringObject() - elif operator == b_("TJ"): - for i in range(len(operands[0])): - if ignoreByteStringObject: - if not isinstance(operands[0][i], TextStringObject): - operands[0][i] = TextStringObject() - - if operator == b_('q'): - seq_graphics = True - if operator == b_('Q'): - seq_graphics = False - if seq_graphics: - if operator in [b_('cm'), b_('w'), b_('J'), b_('j'), b_('M'), b_('d'), b_('ri'), b_('i'), - b_('gs'), b_('W'), b_('b'), b_('s'), b_('S'), b_('f'), b_('F'), b_('n'), b_('m'), b_('l'), - b_('c'), b_('v'), b_('y'), b_('h'), b_('B'), b_('Do'), b_('sh')]: - continue - if operator == b_('re'): - continue - _operations.append((operands, operator)) - - content.operations = _operations - pageRef.__setitem__(NameObject('/Contents'), content) - - def removeText(self, ignoreByteStringObject=False): - """ - Removes images from this output. - - :param bool ignoreByteStringObject: optional parameter - to ignore ByteString Objects. - """ - pages = self.getObject(self._pages)['/Kids'] - for j in range(len(pages)): - page = pages[j] - pageRef = self.getObject(page) - content = pageRef['/Contents'].getObject() - if not isinstance(content, ContentStream): - content = ContentStream(content, pageRef) - for operands,operator in content.operations: - if operator == b_('Tj'): - text = operands[0] - if not ignoreByteStringObject: - if isinstance(text, TextStringObject): - operands[0] = TextStringObject() - else: - if isinstance(text, TextStringObject) or \ - isinstance(text, ByteStringObject): - operands[0] = TextStringObject() - elif operator == b_("'"): - text = operands[0] - if not ignoreByteStringObject: - if isinstance(text, TextStringObject): - operands[0] = TextStringObject() - else: - if isinstance(text, TextStringObject) or \ - isinstance(text, ByteStringObject): - operands[0] = TextStringObject() - elif operator == b_('"'): - text = operands[2] - if not ignoreByteStringObject: - if isinstance(text, TextStringObject): - operands[2] = TextStringObject() - else: - if isinstance(text, TextStringObject) or \ - isinstance(text, ByteStringObject): - operands[2] = TextStringObject() - elif operator == b_("TJ"): - for i in range(len(operands[0])): - if not ignoreByteStringObject: - if isinstance(operands[0][i], TextStringObject): - operands[0][i] = TextStringObject() - else: - if isinstance(operands[0][i], TextStringObject) or \ - isinstance(operands[0][i], ByteStringObject): - operands[0][i] = TextStringObject() - - pageRef.__setitem__(NameObject('/Contents'), content) - - def addLink(self, pagenum, pagedest, rect, border=None, fit='/Fit', *args): - """ - Add an internal link from a rectangular area to the specified page. - - :param int pagenum: index of the page on which to place the link. - :param int pagedest: index of the page to which the link should go. - :param rect: :class:`RectangleObject` or array of four - integers specifying the clickable rectangular area - ``[xLL, yLL, xUR, yUR]``, or string in the form ``"[ xLL yLL xUR yUR ]"``. - :param border: if provided, an array describing border-drawing - properties. See the PDF spec for details. No border will be - drawn if this argument is omitted. - :param str fit: Page fit or 'zoom' option (see below). Additional arguments may need - to be supplied. Passing ``None`` will be read as a null value for that coordinate. - - Valid zoom arguments (see Table 8.2 of the PDF 1.7 reference for details): - /Fit No additional arguments - /XYZ [left] [top] [zoomFactor] - /FitH [top] - /FitV [left] - /FitR [left] [bottom] [right] [top] - /FitB No additional arguments - /FitBH [top] - /FitBV [left] - """ - - pageLink = self.getObject(self._pages)['/Kids'][pagenum] - pageDest = self.getObject(self._pages)['/Kids'][pagedest] #TODO: switch for external link - pageRef = self.getObject(pageLink) - - if border is not None: - borderArr = [NameObject(n) for n in border[:3]] - if len(border) == 4: - dashPattern = ArrayObject([NameObject(n) for n in border[3]]) - borderArr.append(dashPattern) - else: - borderArr = [NumberObject(0)] * 3 - - if isString(rect): - rect = NameObject(rect) - elif isinstance(rect, RectangleObject): - pass - else: - rect = RectangleObject(rect) - - zoomArgs = [] - for a in args: - if a is not None: - zoomArgs.append(NumberObject(a)) - else: - zoomArgs.append(NullObject()) - dest = Destination(NameObject("/LinkName"), pageDest, NameObject(fit), *zoomArgs) #TODO: create a better name for the link - destArray = dest.getDestArray() - - lnk = DictionaryObject() - lnk.update({ - NameObject('/Type'): NameObject('/Annot'), - NameObject('/Subtype'): NameObject('/Link'), - NameObject('/P'): pageLink, - NameObject('/Rect'): rect, - NameObject('/Border'): ArrayObject(borderArr), - NameObject('/Dest'): destArray - }) - lnkRef = self._addObject(lnk) - - if "/Annots" in pageRef: - pageRef['/Annots'].append(lnkRef) - else: - pageRef[NameObject('/Annots')] = ArrayObject([lnkRef]) - - _valid_layouts = ['/NoLayout', '/SinglePage', '/OneColumn', '/TwoColumnLeft', '/TwoColumnRight', '/TwoPageLeft', '/TwoPageRight'] - - def getPageLayout(self): - """ - Get the page layout. - See :meth:`setPageLayout()` for a description of valid layouts. - - :return: Page layout currently being used. - :rtype: str, None if not specified - """ - try: - return self._root_object['/PageLayout'] - except KeyError: - return None - - def setPageLayout(self, layout): - """ - Set the page layout - - :param str layout: The page layout to be used - - Valid layouts are: - /NoLayout Layout explicitly not specified - /SinglePage Show one page at a time - /OneColumn Show one column at a time - /TwoColumnLeft Show pages in two columns, odd-numbered pages on the left - /TwoColumnRight Show pages in two columns, odd-numbered pages on the right - /TwoPageLeft Show two pages at a time, odd-numbered pages on the left - /TwoPageRight Show two pages at a time, odd-numbered pages on the right - """ - if not isinstance(layout, NameObject): - if layout not in self._valid_layouts: - warnings.warn("Layout should be one of: {}".format(', '.join(self._valid_layouts))) - layout = NameObject(layout) - self._root_object.update({NameObject('/PageLayout'): layout}) - - pageLayout = property(getPageLayout, setPageLayout) - """Read and write property accessing the :meth:`getPageLayout()` - and :meth:`setPageLayout()` methods.""" - - _valid_modes = ['/UseNone', '/UseOutlines', '/UseThumbs', '/FullScreen', '/UseOC', '/UseAttachments'] - - def getPageMode(self): - """ - Get the page mode. - See :meth:`setPageMode()` for a description - of valid modes. - - :return: Page mode currently being used. - :rtype: str, None if not specified - """ - try: - return self._root_object['/PageMode'] - except KeyError: - return None - - def setPageMode(self, mode): - """ - Set the page mode. - - :param str mode: The page mode to use. - - Valid modes are: - /UseNone Do not show outlines or thumbnails panels - /UseOutlines Show outlines (aka bookmarks) panel - /UseThumbs Show page thumbnails panel - /FullScreen Fullscreen view - /UseOC Show Optional Content Group (OCG) panel - /UseAttachments Show attachments panel - """ - if not isinstance(mode, NameObject): - if mode not in self._valid_modes: - warnings.warn("Mode should be one of: {}".format(', '.join(self._valid_modes))) - mode = NameObject(mode) - self._root_object.update({NameObject('/PageMode'): mode}) - - pageMode = property(getPageMode, setPageMode) - """Read and write property accessing the :meth:`getPageMode()` - and :meth:`setPageMode()` methods.""" - - -class PdfFileReader(object): - """ - Initializes a PdfFileReader object. This operation can take some time, as - the PDF stream's cross-reference tables are read into memory. - - :param stream: A File object or an object that supports the standard read - and seek methods similar to a File object. Could also be a - string representing a path to a PDF file. - :param bool strict: Determines whether user should be warned of all - problems and also causes some correctable problems to be fatal. - Defaults to ``True``. - :param warndest: Destination for logging warnings (defaults to - ``sys.stderr``). - :param bool overwriteWarnings: Determines whether to override Python's - ``warnings.py`` module with a custom implementation (defaults to - ``True``). - """ - def __init__(self, stream, strict=True, warndest = None, overwriteWarnings = True): - if overwriteWarnings: - # have to dynamically override the default showwarning since there are no - # public methods that specify the 'file' parameter - def _showwarning(message, category, filename, lineno, file=warndest, line=None): - if file is None: - file = sys.stderr - try: - file.write(formatWarning(message, category, filename, lineno, line)) - except IOError: - pass - warnings.showwarning = _showwarning - self.strict = strict - self.flattenedPages = None - self.resolvedObjects = {} - self.xrefIndex = 0 - self._pageId2Num = None # map page IndirectRef number to Page Number - if hasattr(stream, 'mode') and 'b' not in stream.mode: - warnings.warn("PdfFileReader stream/file object is not in binary mode. It may not be read correctly.", utils.PdfReadWarning) - if isString(stream): - fileobj = open(stream, 'rb') - stream = BytesIO(b_(fileobj.read())) - fileobj.close() - self.read(stream) - self.stream = stream - - self._override_encryption = False - - def getDocumentInfo(self): - """ - Retrieves the PDF file's document information dictionary, if it exists. - Note that some PDF files use metadata streams instead of docinfo - dictionaries, and these metadata streams will not be accessed by this - function. - - :return: the document information of this PDF file - :rtype: :class:`DocumentInformation` or ``None`` if none exists. - """ - if "/Info" not in self.trailer: - return None - obj = self.trailer['/Info'] - retval = DocumentInformation() - retval.update(obj) - return retval - - documentInfo = property(lambda self: self.getDocumentInfo(), None, None) - """Read-only property that accesses the :meth:`getDocumentInfo()` function.""" - - def getXmpMetadata(self): - """ - Retrieves XMP (Extensible Metadata Platform) data from the PDF document - root. - - :return: a :class:`XmpInformation` - instance that can be used to access XMP metadata from the document. - :rtype: :class:`XmpInformation` or - ``None`` if no metadata was found on the document root. - """ - try: - self._override_encryption = True - return self.trailer["/Root"].getXmpMetadata() - finally: - self._override_encryption = False - - xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None) - """ - Read-only property that accesses the - :meth:`getXmpMetadata()` function. - """ - - def getNumPages(self): - """ - Calculates the number of pages in this PDF file. - - :return: number of pages - :rtype: int - :raises PdfReadError: if file is encrypted and restrictions prevent - this action. - """ - - # Flattened pages will not work on an Encrypted PDF; - # the PDF file's page count is used in this case. Otherwise, - # the original method (flattened page count) is used. - if self.isEncrypted: - try: - self._override_encryption = True - self.decrypt('') - return self.trailer["/Root"]["/Pages"]["/Count"] - except: - raise utils.PdfReadError("File has not been decrypted") - finally: - self._override_encryption = False - else: - if self.flattenedPages == None: - self._flatten() - return len(self.flattenedPages) - - numPages = property(lambda self: self.getNumPages(), None, None) - """ - Read-only property that accesses the - :meth:`getNumPages()` function. - """ - - def getPage(self, pageNumber): - """ - Retrieves a page by number from this PDF file. - - :param int pageNumber: The page number to retrieve - (pages begin at zero) - :return: a :class:`PageObject` instance. - :rtype: :class:`PageObject` - """ - ## ensure that we're not trying to access an encrypted PDF - #assert not self.trailer.has_key("/Encrypt") - if self.flattenedPages == None: - self._flatten() - return self.flattenedPages[pageNumber] - - namedDestinations = property(lambda self: - self.getNamedDestinations(), None, None) - """ - Read-only property that accesses the - :meth:`getNamedDestinations()` function. - """ - - # A select group of relevant field attributes. For the complete list, - # see section 8.6.2 of the PDF 1.7 reference. - - def getFields(self, tree = None, retval = None, fileobj = None): - """ - Extracts field data if this PDF contains interactive form fields. - The *tree* and *retval* parameters are for recursive use. - - :param fileobj: A file object (usually a text file) to write - a report to on all interactive form fields found. - :return: A dictionary where each key is a field name, and each - value is a :class:`Field` object. By - default, the mapping name is used for keys. - :rtype: dict, or ``None`` if form data could not be located. - """ - fieldAttributes = {"/FT" : "Field Type", "/Parent" : "Parent", - "/T" : "Field Name", "/TU" : "Alternate Field Name", - "/TM" : "Mapping Name", "/Ff" : "Field Flags", - "/V" : "Value", "/DV" : "Default Value"} - if retval == None: - retval = {} - catalog = self.trailer["/Root"] - # get the AcroForm tree - if "/AcroForm" in catalog: - tree = catalog["/AcroForm"] - else: - return None - if tree == None: - return retval - - self._checkKids(tree, retval, fileobj) - for attr in fieldAttributes: - if attr in tree: - # Tree is a field - self._buildField(tree, retval, fileobj, fieldAttributes) - break - - if "/Fields" in tree: - fields = tree["/Fields"] - for f in fields: - field = f.getObject() - self._buildField(field, retval, fileobj, fieldAttributes) - - return retval - - def _buildField(self, field, retval, fileobj, fieldAttributes): - self._checkKids(field, retval, fileobj) - try: - key = field["/TM"] - except KeyError: - try: - key = field["/T"] - except KeyError: - # Ignore no-name field for now - return - if fileobj: - self._writeField(fileobj, field, fieldAttributes) - fileobj.write("\n") - retval[key] = Field(field) - - def _checkKids(self, tree, retval, fileobj): - if "/Kids" in tree: - # recurse down the tree - for kid in tree["/Kids"]: - self.getFields(kid.getObject(), retval, fileobj) - - def _writeField(self, fileobj, field, fieldAttributes): - order = ["/TM", "/T", "/FT", "/Parent", "/TU", "/Ff", "/V", "/DV"] - for attr in order: - attrName = fieldAttributes[attr] - try: - if attr == "/FT": - # Make the field type value more clear - types = {"/Btn":"Button", "/Tx":"Text", "/Ch": "Choice", - "/Sig":"Signature"} - if field[attr] in types: - fileobj.write(attrName + ": " + types[field[attr]] + "\n") - elif attr == "/Parent": - # Let's just write the name of the parent - try: - name = field["/Parent"]["/TM"] - except KeyError: - name = field["/Parent"]["/T"] - fileobj.write(attrName + ": " + name + "\n") - else: - fileobj.write(attrName + ": " + str(field[attr]) + "\n") - except KeyError: - # Field attribute is N/A or unknown, so don't write anything - pass - - def getFormTextFields(self): - ''' Retrieves form fields from the document with textual data (inputs, dropdowns) - ''' - # Retrieve document form fields - formfields = self.getFields() - return dict( - (formfields[field]['/T'], formfields[field].get('/V')) for field in formfields \ - if formfields[field].get('/FT') == '/Tx' - ) - - def getNamedDestinations(self, tree=None, retval=None): - """ - Retrieves the named destinations present in the document. - - :return: a dictionary which maps names to - :class:`Destinations`. - :rtype: dict - """ - if retval == None: - retval = {} - catalog = self.trailer["/Root"] - - # get the name tree - if "/Dests" in catalog: - tree = catalog["/Dests"] - elif "/Names" in catalog: - names = catalog['/Names'] - if "/Dests" in names: - tree = names['/Dests'] - - if tree == None: - return retval - - if "/Kids" in tree: - # recurse down the tree - for kid in tree["/Kids"]: - self.getNamedDestinations(kid.getObject(), retval) - - if "/Names" in tree: - names = tree["/Names"] - for i in range(0, len(names), 2): - key = names[i].getObject() - val = names[i+1].getObject() - if isinstance(val, DictionaryObject) and '/D' in val: - val = val['/D'] - dest = self._buildDestination(key, val) - if dest != None: - retval[key] = dest - - return retval - - outlines = property(lambda self: self.getOutlines(), None, None) - """ - Read-only property that accesses the - :meth:`getOutlines()` function. - """ - - def getOutlines(self, node=None, outlines=None): - """ - Retrieves the document outline present in the document. - - :return: a nested list of :class:`Destinations`. - """ - if outlines == None: - outlines = [] - catalog = self.trailer["/Root"] - - # get the outline dictionary and named destinations - if "/Outlines" in catalog: - try: - lines = catalog["/Outlines"] - except utils.PdfReadError: - # this occurs if the /Outlines object reference is incorrect - # for an example of such a file, see https://unglueit-files.s3.amazonaws.com/ebf/7552c42e9280b4476e59e77acc0bc812.pdf - # so continue to load the file without the Bookmarks - return outlines - - if "/First" in lines: - node = lines["/First"] - self._namedDests = self.getNamedDestinations() - - if node == None: - return outlines - - # see if there are any more outlines - while True: - outline = self._buildOutline(node) - if outline: - outlines.append(outline) - - # check for sub-outlines - if "/First" in node: - subOutlines = [] - self.getOutlines(node["/First"], subOutlines) - if subOutlines: - outlines.append(subOutlines) - - if "/Next" not in node: - break - node = node["/Next"] - - return outlines - - def _getPageNumberByIndirect(self, indirectRef): - """Generate _pageId2Num""" - if self._pageId2Num is None: - id2num = {} - for i, x in enumerate(self.pages): - id2num[x.indirectRef.idnum] = i - self._pageId2Num = id2num - - if isinstance(indirectRef, int): - idnum = indirectRef - else: - idnum = indirectRef.idnum - - ret = self._pageId2Num.get(idnum, -1) - return ret - - def getPageNumber(self, page): - """ - Retrieve page number of a given PageObject - - :param PageObject page: The page to get page number. Should be - an instance of :class:`PageObject` - :return: the page number or -1 if page not found - :rtype: int - """ - indirectRef = page.indirectRef - ret = self._getPageNumberByIndirect(indirectRef) - return ret - - def getDestinationPageNumber(self, destination): - """ - Retrieve page number of a given Destination object - - :param Destination destination: The destination to get page number. - Should be an instance of - :class:`Destination` - :return: the page number or -1 if page not found - :rtype: int - """ - indirectRef = destination.page - ret = self._getPageNumberByIndirect(indirectRef) - return ret - - def _buildDestination(self, title, array): - page, typ = array[0:2] - array = array[2:] - return Destination(title, page, typ, *array) - - def _buildOutline(self, node): - dest, title, outline = None, None, None - - if "/A" in node and "/Title" in node: - # Action, section 8.5 (only type GoTo supported) - title = node["/Title"] - action = node["/A"] - if action["/S"] == "/GoTo": - dest = action["/D"] - elif "/Dest" in node and "/Title" in node: - # Destination, section 8.2.1 - title = node["/Title"] - dest = node["/Dest"] - - # if destination found, then create outline - if dest: - if isinstance(dest, ArrayObject): - outline = self._buildDestination(title, dest) - elif isString(dest) and dest in self._namedDests: - outline = self._namedDests[dest] - outline[NameObject("/Title")] = title - else: - raise utils.PdfReadError("Unexpected destination %r" % dest) - return outline - - pages = property(lambda self: ConvertFunctionsToVirtualList(self.getNumPages, self.getPage), - None, None) - """ - Read-only property that emulates a list based upon the - :meth:`getNumPages()` and - :meth:`getPage()` methods. - """ - - def getPageLayout(self): - """ - Get the page layout. - See :meth:`setPageLayout()` - for a description of valid layouts. - - :return: Page layout currently being used. - :rtype: ``str``, ``None`` if not specified - """ - try: - return self.trailer['/Root']['/PageLayout'] - except KeyError: - return None - - pageLayout = property(getPageLayout) - """Read-only property accessing the - :meth:`getPageLayout()` method.""" - - def getPageMode(self): - """ - Get the page mode. - See :meth:`setPageMode()` - for a description of valid modes. - - :return: Page mode currently being used. - :rtype: ``str``, ``None`` if not specified - """ - try: - return self.trailer['/Root']['/PageMode'] - except KeyError: - return None - - pageMode = property(getPageMode) - """Read-only property accessing the - :meth:`getPageMode()` method.""" - - def _flatten(self, pages=None, inherit=None, indirectRef=None): - inheritablePageAttributes = ( - NameObject("/Resources"), NameObject("/MediaBox"), - NameObject("/CropBox"), NameObject("/Rotate") - ) - if inherit == None: - inherit = dict() - if pages == None: - self.flattenedPages = [] - catalog = self.trailer["/Root"].getObject() - pages = catalog["/Pages"].getObject() - - t = "/Pages" - if "/Type" in pages: - t = pages["/Type"] - - if t == "/Pages": - for attr in inheritablePageAttributes: - if attr in pages: - inherit[attr] = pages[attr] - for page in pages["/Kids"]: - addt = {} - if isinstance(page, IndirectObject): - addt["indirectRef"] = page - self._flatten(page.getObject(), inherit, **addt) - elif t == "/Page": - for attr, value in list(inherit.items()): - # if the page has it's own value, it does not inherit the - # parent's value: - if attr not in pages: - pages[attr] = value - pageObj = PageObject(self, indirectRef) - pageObj.update(pages) - self.flattenedPages.append(pageObj) - - def _getObjectFromStream(self, indirectReference): - # indirect reference to object in object stream - # read the entire object stream into memory - debug = False - stmnum, idx = self.xref_objStm[indirectReference.idnum] - if debug: print(("Here1: %s %s"%(stmnum, idx))) - objStm = IndirectObject(stmnum, 0, self).getObject() - if debug: print(("Here2: objStm=%s.. stmnum=%s data=%s"%(objStm, stmnum, objStm.getData()))) - # This is an xref to a stream, so its type better be a stream - assert objStm['/Type'] == '/ObjStm' - # /N is the number of indirect objects in the stream - assert idx < objStm['/N'] - streamData = BytesIO(b_(objStm.getData())) - for i in range(objStm['/N']): - readNonWhitespace(streamData) - streamData.seek(-1, 1) - objnum = NumberObject.readFromStream(streamData) - readNonWhitespace(streamData) - streamData.seek(-1, 1) - offset = NumberObject.readFromStream(streamData) - readNonWhitespace(streamData) - streamData.seek(-1, 1) - if objnum != indirectReference.idnum: - # We're only interested in one object - continue - if self.strict and idx != i: - raise utils.PdfReadError("Object is in wrong index.") - streamData.seek(objStm['/First']+offset, 0) - if debug: - pos = streamData.tell() - streamData.seek(0, 0) - lines = streamData.readlines() - for i in range(0, len(lines)): - print((lines[i])) - streamData.seek(pos, 0) - try: - obj = readObject(streamData, self) - except utils.PdfStreamError as e: - # Stream object cannot be read. Normally, a critical error, but - # Adobe Reader doesn't complain, so continue (in strict mode?) - e = sys.exc_info()[1] - warnings.warn("Invalid stream (index %d) within object %d %d: %s" % \ - (i, indirectReference.idnum, indirectReference.generation, e), utils.PdfReadWarning) - - if self.strict: - raise utils.PdfReadError("Can't read object stream: %s"%e) - # Replace with null. Hopefully it's nothing important. - obj = NullObject() - return obj - - if self.strict: raise utils.PdfReadError("This is a fatal error in strict mode.") - return NullObject() - - def getObject(self, indirectReference): - debug = False - if debug: print(("looking at:", indirectReference.idnum, indirectReference.generation)) - retval = self.cacheGetIndirectObject(indirectReference.generation, - indirectReference.idnum) - if retval != None: - return retval - if indirectReference.generation == 0 and \ - indirectReference.idnum in self.xref_objStm: - retval = self._getObjectFromStream(indirectReference) - elif indirectReference.generation in self.xref and \ - indirectReference.idnum in self.xref[indirectReference.generation]: - start = self.xref[indirectReference.generation][indirectReference.idnum] - if debug: print((" Uncompressed Object", indirectReference.idnum, indirectReference.generation, ":", start)) - self.stream.seek(start, 0) - idnum, generation = self.readObjectHeader(self.stream) - if idnum != indirectReference.idnum and self.xrefIndex: - # Xref table probably had bad indexes due to not being zero-indexed - if self.strict: - raise utils.PdfReadError("Expected object ID (%d %d) does not match actual (%d %d); xref table not zero-indexed." \ - % (indirectReference.idnum, indirectReference.generation, idnum, generation)) - else: pass # xref table is corrected in non-strict mode - elif idnum != indirectReference.idnum: - # some other problem - raise utils.PdfReadError("Expected object ID (%d %d) does not match actual (%d %d)." \ - % (indirectReference.idnum, indirectReference.generation, idnum, generation)) - assert generation == indirectReference.generation - retval = readObject(self.stream, self) - - # override encryption is used for the /Encrypt dictionary - if not self._override_encryption and self.isEncrypted: - # if we don't have the encryption key: - if not hasattr(self, '_decryption_key'): - raise utils.PdfReadError("file has not been decrypted") - # otherwise, decrypt here... - import struct - pack1 = struct.pack(">read", stream) - # start at the end: - stream.seek(-1, 2) - if not stream.tell(): - raise utils.PdfReadError('Cannot read an empty file') - last1K = stream.tell() - 1024 + 1 # offset of last 1024 bytes of stream - line = b_('') - while line[:5] != b_("%%EOF"): - if stream.tell() < last1K: - raise utils.PdfReadError("EOF marker not found") - line = self.readNextEndLine(stream) - if debug: print(" line:",line) - - # find startxref entry - the location of the xref table - line = self.readNextEndLine(stream) - try: - startxref = int(line) - except ValueError: - # 'startxref' may be on the same line as the location - if not line.startswith(b_("startxref")): - raise utils.PdfReadError("startxref not found") - startxref = int(line[9:].strip()) - warnings.warn("startxref on same line as offset") - else: - line = self.readNextEndLine(stream) - if line[:9] != b_("startxref"): - raise utils.PdfReadError("startxref not found") - - # read all cross reference tables and their trailers - self.xref = {} - self.xref_objStm = {} - self.trailer = DictionaryObject() - while True: - # load the xref table - stream.seek(startxref, 0) - x = stream.read(1) - if x == b_("x"): - # standard cross-reference table - ref = stream.read(4) - if ref[:3] != b_("ref"): - raise utils.PdfReadError("xref table read error") - readNonWhitespace(stream) - stream.seek(-1, 1) - firsttime = True; # check if the first time looking at the xref table - while True: - num = readObject(stream, self) - if firsttime and num != 0: - self.xrefIndex = num - if self.strict: - warnings.warn("Xref table not zero-indexed. ID numbers for objects will be corrected.", utils.PdfReadWarning) - #if table not zero indexed, could be due to error from when PDF was created - #which will lead to mismatched indices later on, only warned and corrected if self.strict=True - firsttime = False - readNonWhitespace(stream) - stream.seek(-1, 1) - size = readObject(stream, self) - readNonWhitespace(stream) - stream.seek(-1, 1) - cnt = 0 - while cnt < size: - line = stream.read(20) - - # It's very clear in section 3.4.3 of the PDF spec - # that all cross-reference table lines are a fixed - # 20 bytes (as of PDF 1.7). However, some files have - # 21-byte entries (or more) due to the use of \r\n - # (CRLF) EOL's. Detect that case, and adjust the line - # until it does not begin with a \r (CR) or \n (LF). - while line[0] in b_("\x0D\x0A"): - stream.seek(-20 + 1, 1) - line = stream.read(20) - - # On the other hand, some malformed PDF files - # use a single character EOL without a preceeding - # space. Detect that case, and seek the stream - # back one character. (0-9 means we've bled into - # the next xref entry, t means we've bled into the - # text "trailer"): - if line[-1] in b_("0123456789t"): - stream.seek(-1, 1) - - offset, generation = line[:16].split(b_(" ")) - offset, generation = int(offset), int(generation) - if generation not in self.xref: - self.xref[generation] = {} - if num in self.xref[generation]: - # It really seems like we should allow the last - # xref table in the file to override previous - # ones. Since we read the file backwards, assume - # any existing key is already set correctly. - pass - else: - self.xref[generation][num] = offset - cnt += 1 - num += 1 - readNonWhitespace(stream) - stream.seek(-1, 1) - trailertag = stream.read(7) - if trailertag != b_("trailer"): - # more xrefs! - stream.seek(-7, 1) - else: - break - readNonWhitespace(stream) - stream.seek(-1, 1) - newTrailer = readObject(stream, self) - for key, value in list(newTrailer.items()): - if key not in self.trailer: - self.trailer[key] = value - if "/Prev" in newTrailer: - startxref = newTrailer["/Prev"] - else: - break - elif x.isdigit(): - # PDF 1.5+ Cross-Reference Stream - stream.seek(-1, 1) - idnum, generation = self.readObjectHeader(stream) - xrefstream = readObject(stream, self) - assert xrefstream["/Type"] == "/XRef" - self.cacheIndirectObject(generation, idnum, xrefstream) - streamData = BytesIO(b_(xrefstream.getData())) - # Index pairs specify the subsections in the dictionary. If - # none create one subsection that spans everything. - idx_pairs = xrefstream.get("/Index", [0, xrefstream.get("/Size")]) - if debug: print(("read idx_pairs=%s"%list(self._pairs(idx_pairs)))) - entrySizes = xrefstream.get("/W") - assert len(entrySizes) >= 3 - if self.strict and len(entrySizes) > 3: - raise utils.PdfReadError("Too many entry sizes: %s" %entrySizes) - - def getEntry(i): - # Reads the correct number of bytes for each entry. See the - # discussion of the W parameter in PDF spec table 17. - if entrySizes[i] > 0: - d = streamData.read(entrySizes[i]) - return convertToInt(d, entrySizes[i]) - - # PDF Spec Table 17: A value of zero for an element in the - # W array indicates...the default value shall be used - if i == 0: return 1 # First value defaults to 1 - else: return 0 - - def used_before(num, generation): - # We move backwards through the xrefs, don't replace any. - return num in self.xref.get(generation, []) or \ - num in self.xref_objStm - - # Iterate through each subsection - last_end = 0 - for start, size in self._pairs(idx_pairs): - # The subsections must increase - assert start >= last_end - last_end = start + size - for num in range(start, start+size): - # The first entry is the type - xref_type = getEntry(0) - # The rest of the elements depend on the xref_type - if xref_type == 0: - # linked list of free objects - next_free_object = getEntry(1) - next_generation = getEntry(2) - elif xref_type == 1: - # objects that are in use but are not compressed - byte_offset = getEntry(1) - generation = getEntry(2) - if generation not in self.xref: - self.xref[generation] = {} - if not used_before(num, generation): - self.xref[generation][num] = byte_offset - if debug: print(("XREF Uncompressed: %s %s"%( - num, generation))) - elif xref_type == 2: - # compressed objects - objstr_num = getEntry(1) - obstr_idx = getEntry(2) - generation = 0 # PDF spec table 18, generation is 0 - if not used_before(num, generation): - if debug: print(("XREF Compressed: %s %s %s"%( - num, objstr_num, obstr_idx))) - self.xref_objStm[num] = (objstr_num, obstr_idx) - elif self.strict: - raise utils.PdfReadError("Unknown xref type: %s"% - xref_type) - - trailerKeys = "/Root", "/Encrypt", "/Info", "/ID" - for key in trailerKeys: - if key in xrefstream and key not in self.trailer: - self.trailer[NameObject(key)] = xrefstream.raw_get(key) - if "/Prev" in xrefstream: - startxref = xrefstream["/Prev"] - else: - break - else: - # bad xref character at startxref. Let's see if we can find - # the xref table nearby, as we've observed this error with an - # off-by-one before. - stream.seek(-11, 1) - tmp = stream.read(20) - xref_loc = tmp.find(b_("xref")) - if xref_loc != -1: - startxref -= (10 - xref_loc) - continue - # No explicit xref table, try finding a cross-reference stream. - stream.seek(startxref, 0) - found = False - for look in range(5): - if stream.read(1).isdigit(): - # This is not a standard PDF, consider adding a warning - startxref += look - found = True - break - if found: - continue - # no xref table found at specified location - raise utils.PdfReadError("Could not find xref table at specified location") - #if not zero-indexed, verify that the table is correct; change it if necessary - if self.xrefIndex and not self.strict: - loc = stream.tell() - for gen in self.xref: - if gen == 65535: continue - for id in self.xref[gen]: - stream.seek(self.xref[gen][id], 0) - try: - pid, pgen = self.readObjectHeader(stream) - except ValueError: - break - if pid == id - self.xrefIndex: - self._zeroXref(gen) - break - #if not, then either it's just plain wrong, or the non-zero-index is actually correct - stream.seek(loc, 0) #return to where it was - - def _zeroXref(self, generation): - self.xref[generation] = dict( (k-self.xrefIndex, v) for (k, v) in list(self.xref[generation].items()) ) - - def _pairs(self, array): - i = 0 - while True: - yield array[i], array[i+1] - i += 2 - if (i+1) >= len(array): - break - - def readNextEndLine(self, stream): - debug = False - if debug: print(">>readNextEndLine") - line = b_("") - while True: - # Prevent infinite loops in malformed PDFs - if stream.tell() == 0: - raise utils.PdfReadError("Could not read malformed PDF file") - x = stream.read(1) - if debug: print((" x:", x, "%x"%ord(x))) - if stream.tell() < 2: - raise utils.PdfReadError("EOL marker not found") - stream.seek(-2, 1) - if x == b_('\n') or x == b_('\r'): ## \n = LF; \r = CR - crlf = False - while x == b_('\n') or x == b_('\r'): - if debug: - if ord(x) == 0x0D: print(" x is CR 0D") - elif ord(x) == 0x0A: print(" x is LF 0A") - x = stream.read(1) - if x == b_('\n') or x == b_('\r'): # account for CR+LF - stream.seek(-1, 1) - crlf = True - if stream.tell() < 2: - raise utils.PdfReadError("EOL marker not found") - stream.seek(-2, 1) - stream.seek(2 if crlf else 1, 1) #if using CR+LF, go back 2 bytes, else 1 - break - else: - if debug: print(" x is neither") - line = x + line - if debug: print((" RNEL line:", line)) - if debug: print("leaving RNEL") - return line - - def decrypt(self, password): - """ - When using an encrypted / secured PDF file with the PDF Standard - encryption handler, this function will allow the file to be decrypted. - It checks the given password against the document's user password and - owner password, and then stores the resulting decryption key if either - password is correct. - - It does not matter which password was matched. Both passwords provide - the correct decryption key that will allow the document to be used with - this library. - - :param str password: The password to match. - :return: ``0`` if the password failed, ``1`` if the password matched the user - password, and ``2`` if the password matched the owner password. - :rtype: int - :raises NotImplementedError: if document uses an unsupported encryption - method. - """ - - self._override_encryption = True - try: - return self._decrypt(password) - finally: - self._override_encryption = False - - def _decrypt(self, password): - encrypt = self.trailer['/Encrypt'].getObject() - if encrypt['/Filter'] != '/Standard': - raise NotImplementedError("only Standard PDF encryption handler is available") - if not (encrypt['/V'] in (1, 2)): - raise NotImplementedError("only algorithm code 1 and 2 are supported") - user_password, key = self._authenticateUserPassword(password) - if user_password: - self._decryption_key = key - return 1 - else: - rev = encrypt['/R'].getObject() - if rev == 2: - keylen = 5 - else: - keylen = encrypt['/Length'].getObject() // 8 - key = _alg33_1(password, rev, keylen) - real_O = encrypt["/O"].getObject() - if rev == 2: - userpass = utils.RC4_encrypt(key, real_O) - else: - val = real_O - for i in range(19, -1, -1): - new_key = b_('') - for l in range(len(key)): - new_key += b_(chr(utils.ord_(key[l]) ^ i)) - val = utils.RC4_encrypt(new_key, val) - userpass = val - owner_password, key = self._authenticateUserPassword(userpass) - if owner_password: - self._decryption_key = key - return 2 - return 0 - - def _authenticateUserPassword(self, password): - encrypt = self.trailer['/Encrypt'].getObject() - rev = encrypt['/R'].getObject() - owner_entry = encrypt['/O'].getObject() - p_entry = encrypt['/P'].getObject() - id_entry = self.trailer['/ID'].getObject() - id1_entry = id_entry[0].getObject() - real_U = encrypt['/U'].getObject().original_bytes - if rev == 2: - U, key = _alg34(password, owner_entry, p_entry, id1_entry) - elif rev >= 3: - U, key = _alg35(password, rev, - encrypt["/Length"].getObject() // 8, owner_entry, - p_entry, id1_entry, - encrypt.get("/EncryptMetadata", BooleanObject(False)).getObject()) - U, real_U = U[:16], real_U[:16] - return U == real_U, key - - def getIsEncrypted(self): - return "/Encrypt" in self.trailer - - isEncrypted = property(lambda self: self.getIsEncrypted(), None, None) - """ - Read-only boolean property showing whether this PDF file is encrypted. - Note that this property, if true, will remain true even after the - :meth:`decrypt()` method is called. - """ - - -def getRectangle(self, name, defaults): - retval = self.get(name) - if isinstance(retval, RectangleObject): - return retval - if retval == None: - for d in defaults: - retval = self.get(d) - if retval != None: - break - if isinstance(retval, IndirectObject): - retval = self.pdf.getObject(retval) - retval = RectangleObject(retval) - setRectangle(self, name, retval) - return retval - - -def setRectangle(self, name, value): - if not isinstance(name, NameObject): - name = NameObject(name) - self[name] = value - - -def deleteRectangle(self, name): - del self[name] - - -def createRectangleAccessor(name, fallback): - return \ - property( - lambda self: getRectangle(self, name, fallback), - lambda self, value: setRectangle(self, name, value), - lambda self: deleteRectangle(self, name) - ) - - -class PageObject(DictionaryObject): - """ - This class represents a single page within a PDF file. Typically this - object will be created by accessing the - :meth:`getPage()` method of the - :class:`PdfFileReader` class, but it is - also possible to create an empty page with the - :meth:`createBlankPage()` static method. - - :param pdf: PDF file the page belongs to. - :param indirectRef: Stores the original indirect reference to - this object in its source PDF - """ - def __init__(self, pdf=None, indirectRef=None): - DictionaryObject.__init__(self) - self.pdf = pdf - self.indirectRef = indirectRef - - def createBlankPage(pdf=None, width=None, height=None): - """ - Returns a new blank page. - If ``width`` or ``height`` is ``None``, try to get the page size - from the last page of *pdf*. - - :param pdf: PDF file the page belongs to - :param float width: The width of the new page expressed in default user - space units. - :param float height: The height of the new page expressed in default user - space units. - :return: the new blank page: - :rtype: :class:`PageObject` - :raises PageSizeNotDefinedError: if ``pdf`` is ``None`` or contains - no page - """ - page = PageObject(pdf) - - # Creates a new page (cf PDF Reference 7.7.3.3) - page.__setitem__(NameObject('/Type'), NameObject('/Page')) - page.__setitem__(NameObject('/Parent'), NullObject()) - page.__setitem__(NameObject('/Resources'), DictionaryObject()) - if width is None or height is None: - if pdf is not None and pdf.getNumPages() > 0: - lastpage = pdf.getPage(pdf.getNumPages() - 1) - width = lastpage.mediaBox.getWidth() - height = lastpage.mediaBox.getHeight() - else: - raise utils.PageSizeNotDefinedError() - page.__setitem__(NameObject('/MediaBox'), - RectangleObject([0, 0, width, height])) - - return page - createBlankPage = staticmethod(createBlankPage) - - def rotateClockwise(self, angle): - """ - Rotates a page clockwise by increments of 90 degrees. - - :param int angle: Angle to rotate the page. Must be an increment - of 90 deg. - """ - assert angle % 90 == 0 - self._rotate(angle) - return self - - def rotateCounterClockwise(self, angle): - """ - Rotates a page counter-clockwise by increments of 90 degrees. - - :param int angle: Angle to rotate the page. Must be an increment - of 90 deg. - """ - assert angle % 90 == 0 - self._rotate(-angle) - return self - - def _rotate(self, angle): - currentAngle = self.get("/Rotate", 0) - self[NameObject("/Rotate")] = NumberObject(currentAngle + angle) - - def _mergeResources(res1, res2, resource): - newRes = DictionaryObject() - newRes.update(res1.get(resource, DictionaryObject()).getObject()) - page2Res = res2.get(resource, DictionaryObject()).getObject() - renameRes = {} - for key in list(page2Res.keys()): - if key in newRes and newRes.raw_get(key) != page2Res.raw_get(key): - newname = NameObject(key + str(uuid.uuid4())) - renameRes[key] = newname - newRes[newname] = page2Res[key] - elif key not in newRes: - newRes[key] = page2Res.raw_get(key) - return newRes, renameRes - _mergeResources = staticmethod(_mergeResources) - - def _contentStreamRename(stream, rename, pdf): - if not rename: - return stream - stream = ContentStream(stream, pdf) - for operands, operator in stream.operations: - for i in range(len(operands)): - op = operands[i] - if isinstance(op, NameObject): - operands[i] = rename.get(op,op) - return stream - _contentStreamRename = staticmethod(_contentStreamRename) - - def _pushPopGS(contents, pdf): - # adds a graphics state "push" and "pop" to the beginning and end - # of a content stream. This isolates it from changes such as - # transformation matricies. - stream = ContentStream(contents, pdf) - stream.operations.insert(0, [[], "q"]) - stream.operations.append([[], "Q"]) - return stream - _pushPopGS = staticmethod(_pushPopGS) - - def _addTransformationMatrix(contents, pdf, ctm): - # adds transformation matrix at the beginning of the given - # contents stream. - a, b, c, d, e, f = ctm - contents = ContentStream(contents, pdf) - contents.operations.insert(0, [[FloatObject(a), FloatObject(b), - FloatObject(c), FloatObject(d), FloatObject(e), - FloatObject(f)], " cm"]) - return contents - _addTransformationMatrix = staticmethod(_addTransformationMatrix) - - def getContents(self): - """ - Accesses the page contents. - - :return: the ``/Contents`` object, or ``None`` if it doesn't exist. - ``/Contents`` is optional, as described in PDF Reference 7.7.3.3 - """ - if "/Contents" in self: - return self["/Contents"].getObject() - else: - return None - - def mergePage(self, page2): - """ - Merges the content streams of two pages into one. Resource references - (i.e. fonts) are maintained from both pages. The mediabox/cropbox/etc - of this page are not altered. The parameter page's content stream will - be added to the end of this page's content stream, meaning that it will - be drawn after, or "on top" of this page. - - :param PageObject page2: The page to be merged into this one. Should be - an instance of :class:`PageObject`. - """ - self._mergePage(page2) - - def _mergePage(self, page2, page2transformation=None, ctm=None, expand=False): - # First we work on merging the resource dictionaries. This allows us - # to find out what symbols in the content streams we might need to - # rename. - - newResources = DictionaryObject() - rename = {} - originalResources = self["/Resources"].getObject() - page2Resources = page2["/Resources"].getObject() - newAnnots = ArrayObject() - - for page in (self, page2): - if "/Annots" in page: - annots = page["/Annots"] - if isinstance(annots, ArrayObject): - for ref in annots: - newAnnots.append(ref) - - for res in "/ExtGState", "/Font", "/XObject", "/ColorSpace", "/Pattern", "/Shading", "/Properties": - new, newrename = PageObject._mergeResources(originalResources, page2Resources, res) - if new: - newResources[NameObject(res)] = new - rename.update(newrename) - - # Combine /ProcSet sets. - newResources[NameObject("/ProcSet")] = ArrayObject( - frozenset(originalResources.get("/ProcSet", ArrayObject()).getObject()).union( - frozenset(page2Resources.get("/ProcSet", ArrayObject()).getObject()) - ) - ) - - newContentArray = ArrayObject() - - originalContent = self.getContents() - if originalContent is not None: - newContentArray.append(PageObject._pushPopGS( - originalContent, self.pdf)) - - page2Content = page2.getContents() - if page2Content is not None: - if page2transformation is not None: - page2Content = page2transformation(page2Content) - page2Content = PageObject._contentStreamRename( - page2Content, rename, self.pdf) - page2Content = PageObject._pushPopGS(page2Content, self.pdf) - newContentArray.append(page2Content) - - # if expanding the page to fit a new page, calculate the new media box size - if expand: - corners1 = [self.mediaBox.getLowerLeft_x().as_numeric(), self.mediaBox.getLowerLeft_y().as_numeric(), - self.mediaBox.getUpperRight_x().as_numeric(), self.mediaBox.getUpperRight_y().as_numeric()] - corners2 = [page2.mediaBox.getLowerLeft_x().as_numeric(), page2.mediaBox.getLowerLeft_y().as_numeric(), - page2.mediaBox.getUpperLeft_x().as_numeric(), page2.mediaBox.getUpperLeft_y().as_numeric(), - page2.mediaBox.getUpperRight_x().as_numeric(), page2.mediaBox.getUpperRight_y().as_numeric(), - page2.mediaBox.getLowerRight_x().as_numeric(), page2.mediaBox.getLowerRight_y().as_numeric()] - if ctm is not None: - ctm = [float(x) for x in ctm] - new_x = [ctm[0]*corners2[i] + ctm[2]*corners2[i+1] + ctm[4] for i in range(0, 8, 2)] - new_y = [ctm[1]*corners2[i] + ctm[3]*corners2[i+1] + ctm[5] for i in range(0, 8, 2)] - else: - new_x = corners2[0:8:2] - new_y = corners2[1:8:2] - lowerleft = [min(new_x), min(new_y)] - upperright = [max(new_x), max(new_y)] - lowerleft = [min(corners1[0], lowerleft[0]), min(corners1[1], lowerleft[1])] - upperright = [max(corners1[2], upperright[0]), max(corners1[3], upperright[1])] - - self.mediaBox.setLowerLeft(lowerleft) - self.mediaBox.setUpperRight(upperright) - - self[NameObject('/Contents')] = ContentStream(newContentArray, self.pdf) - self[NameObject('/Resources')] = newResources - self[NameObject('/Annots')] = newAnnots - - def mergeTransformedPage(self, page2, ctm, expand=False): - """ - This is similar to mergePage, but a transformation matrix is - applied to the merged stream. - - :param PageObject page2: The page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param tuple ctm: a 6-element tuple containing the operands of the - transformation matrix - :param bool expand: Whether the page should be expanded to fit the dimensions - of the page to be merged. - """ - self._mergePage(page2, lambda page2Content: - PageObject._addTransformationMatrix(page2Content, page2.pdf, ctm), ctm, expand) - - def mergeScaledPage(self, page2, scale, expand=False): - """ - This is similar to mergePage, but the stream to be merged is scaled - by appling a transformation matrix. - - :param PageObject page2: The page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float scale: The scaling factor - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - # CTM to scale : [ sx 0 0 sy 0 0 ] - return self.mergeTransformedPage(page2, [scale, 0, - 0, scale, - 0, 0], expand) - - def mergeRotatedPage(self, page2, rotation, expand=False): - """ - This is similar to mergePage, but the stream to be merged is rotated - by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float rotation: The angle of the rotation, in degrees - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - rotation = math.radians(rotation) - return self.mergeTransformedPage(page2, - [math.cos(rotation), math.sin(rotation), - -math.sin(rotation), math.cos(rotation), - 0, 0], expand) - - def mergeTranslatedPage(self, page2, tx, ty, expand=False): - """ - This is similar to mergePage, but the stream to be merged is translated - by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float tx: The translation on X axis - :param float ty: The translation on Y axis - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - return self.mergeTransformedPage(page2, [1, 0, - 0, 1, - tx, ty], expand) - - def mergeRotatedTranslatedPage(self, page2, rotation, tx, ty, expand=False): - """ - This is similar to mergePage, but the stream to be merged is rotated - and translated by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float tx: The translation on X axis - :param float ty: The translation on Y axis - :param float rotation: The angle of the rotation, in degrees - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - - translation = [[1, 0, 0], - [0, 1, 0], - [-tx, -ty, 1]] - rotation = math.radians(rotation) - rotating = [[math.cos(rotation), math.sin(rotation), 0], - [-math.sin(rotation), math.cos(rotation), 0], - [0, 0, 1]] - rtranslation = [[1, 0, 0], - [0, 1, 0], - [tx, ty, 1]] - ctm = utils.matrixMultiply(translation, rotating) - ctm = utils.matrixMultiply(ctm, rtranslation) - - return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1], - ctm[1][0], ctm[1][1], - ctm[2][0], ctm[2][1]], expand) - - def mergeRotatedScaledPage(self, page2, rotation, scale, expand=False): - """ - This is similar to mergePage, but the stream to be merged is rotated - and scaled by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float rotation: The angle of the rotation, in degrees - :param float scale: The scaling factor - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - rotation = math.radians(rotation) - rotating = [[math.cos(rotation), math.sin(rotation), 0], - [-math.sin(rotation), math.cos(rotation), 0], - [0, 0, 1]] - scaling = [[scale, 0, 0], - [0, scale, 0], - [0, 0, 1]] - ctm = utils.matrixMultiply(rotating, scaling) - - return self.mergeTransformedPage(page2, - [ctm[0][0], ctm[0][1], - ctm[1][0], ctm[1][1], - ctm[2][0], ctm[2][1]], expand) - - def mergeScaledTranslatedPage(self, page2, scale, tx, ty, expand=False): - """ - This is similar to mergePage, but the stream to be merged is translated - and scaled by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float scale: The scaling factor - :param float tx: The translation on X axis - :param float ty: The translation on Y axis - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - - translation = [[1, 0, 0], - [0, 1, 0], - [tx, ty, 1]] - scaling = [[scale, 0, 0], - [0, scale, 0], - [0, 0, 1]] - ctm = utils.matrixMultiply(scaling, translation) - - return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1], - ctm[1][0], ctm[1][1], - ctm[2][0], ctm[2][1]], expand) - - def mergeRotatedScaledTranslatedPage(self, page2, rotation, scale, tx, ty, expand=False): - """ - This is similar to mergePage, but the stream to be merged is translated, - rotated and scaled by appling a transformation matrix. - - :param PageObject page2: the page to be merged into this one. Should be - an instance of :class:`PageObject`. - :param float tx: The translation on X axis - :param float ty: The translation on Y axis - :param float rotation: The angle of the rotation, in degrees - :param float scale: The scaling factor - :param bool expand: Whether the page should be expanded to fit the - dimensions of the page to be merged. - """ - translation = [[1, 0, 0], - [0, 1, 0], - [tx, ty, 1]] - rotation = math.radians(rotation) - rotating = [[math.cos(rotation), math.sin(rotation), 0], - [-math.sin(rotation), math.cos(rotation), 0], - [0, 0, 1]] - scaling = [[scale, 0, 0], - [0, scale, 0], - [0, 0, 1]] - ctm = utils.matrixMultiply(rotating, scaling) - ctm = utils.matrixMultiply(ctm, translation) - - return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1], - ctm[1][0], ctm[1][1], - ctm[2][0], ctm[2][1]], expand) - - ## - # Applys a transformation matrix the page. - # - # @param ctm A 6 elements tuple containing the operands of the - # transformation matrix - def addTransformation(self, ctm): - """ - Applies a transformation matrix to the page. - - :param tuple ctm: A 6-element tuple containing the operands of the - transformation matrix. - """ - originalContent = self.getContents() - if originalContent is not None: - newContent = PageObject._addTransformationMatrix( - originalContent, self.pdf, ctm) - newContent = PageObject._pushPopGS(newContent, self.pdf) - self[NameObject('/Contents')] = newContent - - def scale(self, sx, sy): - """ - Scales a page by the given factors by appling a transformation - matrix to its content and updating the page size. - - :param float sx: The scaling factor on horizontal axis. - :param float sy: The scaling factor on vertical axis. - """ - self.addTransformation([sx, 0, - 0, sy, - 0, 0]) - self.mediaBox = RectangleObject([ - float(self.mediaBox.getLowerLeft_x()) * sx, - float(self.mediaBox.getLowerLeft_y()) * sy, - float(self.mediaBox.getUpperRight_x()) * sx, - float(self.mediaBox.getUpperRight_y()) * sy]) - if "/VP" in self: - viewport = self["/VP"] - if isinstance(viewport, ArrayObject): - bbox = viewport[0]["/BBox"] - else: - bbox = viewport["/BBox"] - scaled_bbox = RectangleObject([ - float(bbox[0]) * sx, - float(bbox[1]) * sy, - float(bbox[2]) * sx, - float(bbox[3]) * sy]) - if isinstance(viewport, ArrayObject): - self[NameObject("/VP")][NumberObject(0)][NameObject("/BBox")] = scaled_bbox - else: - self[NameObject("/VP")][NameObject("/BBox")] = scaled_bbox - - def scaleBy(self, factor): - """ - Scales a page by the given factor by appling a transformation - matrix to its content and updating the page size. - - :param float factor: The scaling factor (for both X and Y axis). - """ - self.scale(factor, factor) - - def scaleTo(self, width, height): - """ - Scales a page to the specified dimentions by appling a - transformation matrix to its content and updating the page size. - - :param float width: The new width. - :param float height: The new heigth. - """ - sx = width / float(self.mediaBox.getUpperRight_x() - - self.mediaBox.getLowerLeft_x ()) - sy = height / float(self.mediaBox.getUpperRight_y() - - self.mediaBox.getLowerLeft_y ()) - self.scale(sx, sy) - - def compressContentStreams(self): - """ - Compresses the size of this page by joining all content streams and - applying a FlateDecode filter. - - However, it is possible that this function will perform no action if - content stream compression becomes "automatic" for some reason. - """ - content = self.getContents() - if content is not None: - if not isinstance(content, ContentStream): - content = ContentStream(content, self.pdf) - self[NameObject("/Contents")] = content.flateEncode() - - def extractText(self): - """ - Locate all text drawing commands, in the order they are provided in the - content stream, and extract the text. This works well for some PDF - files, but poorly for others, depending on the generator used. This will - be refined in the future. Do not rely on the order of text coming out of - this function, as it will change if this function is made more - sophisticated. - - :return: a unicode string object. - """ - text = u_("") - content = self["/Contents"].getObject() - if not isinstance(content, ContentStream): - content = ContentStream(content, self.pdf) - # Note: we check all strings are TextStringObjects. ByteStringObjects - # are strings where the byte->string encoding was unknown, so adding - # them to the text here would be gibberish. - for operands, operator in content.operations: - if operator == b_("Tj"): - _text = operands[0] - if isinstance(_text, TextStringObject): - text += _text - elif operator == b_("T*"): - text += "\n" - elif operator == b_("'"): - text += "\n" - _text = operands[0] - if isinstance(_text, TextStringObject): - text += operands[0] - elif operator == b_('"'): - _text = operands[2] - if isinstance(_text, TextStringObject): - text += "\n" - text += _text - elif operator == b_("TJ"): - for i in operands[0]: - if isinstance(i, TextStringObject): - text += i - text += "\n" - return text - - mediaBox = createRectangleAccessor("/MediaBox", ()) - """ - A :class:`RectangleObject`, expressed in default user space units, - defining the boundaries of the physical medium on which the page is - intended to be displayed or printed. - """ - - cropBox = createRectangleAccessor("/CropBox", ("/MediaBox",)) - """ - A :class:`RectangleObject`, expressed in default user space units, - defining the visible region of default user space. When the page is - displayed or printed, its contents are to be clipped (cropped) to this - rectangle and then imposed on the output medium in some - implementation-defined manner. Default value: same as :attr:`mediaBox`. - """ - - bleedBox = createRectangleAccessor("/BleedBox", ("/CropBox", "/MediaBox")) - """ - A :class:`RectangleObject`, expressed in default user space units, - defining the region to which the contents of the page should be clipped - when output in a production enviroment. - """ - - trimBox = createRectangleAccessor("/TrimBox", ("/CropBox", "/MediaBox")) - """ - A :class:`RectangleObject`, expressed in default user space units, - defining the intended dimensions of the finished page after trimming. - """ - - artBox = createRectangleAccessor("/ArtBox", ("/CropBox", "/MediaBox")) - """ - A :class:`RectangleObject`, expressed in default user space units, - defining the extent of the page's meaningful content as intended by the - page's creator. - """ - - -class ContentStream(DecodedStreamObject): - def __init__(self, stream, pdf): - self.pdf = pdf - self.operations = [] - # stream may be a StreamObject or an ArrayObject containing - # multiple StreamObjects to be cat'd together. - stream = stream.getObject() - if isinstance(stream, ArrayObject): - data = b_("") - for s in stream: - data += s.getObject().getData() - stream = BytesIO(b_(data)) - else: - stream = BytesIO(b_(stream.getData())) - self.__parseContentStream(stream) - - def __parseContentStream(self, stream): - # file("f:\\tmp.txt", "w").write(stream.read()) - stream.seek(0, 0) - operands = [] - while True: - peek = readNonWhitespace(stream) - if peek == b_('') or ord_(peek) == 0: - break - stream.seek(-1, 1) - if peek.isalpha() or peek == b_("'") or peek == b_('"'): - operator = utils.readUntilRegex(stream, - NameObject.delimiterPattern, True) - if operator == b_("BI"): - # begin inline image - a completely different parsing - # mechanism is required, of course... thanks buddy... - assert operands == [] - ii = self._readInlineImage(stream) - self.operations.append((ii, b_("INLINE IMAGE"))) - else: - self.operations.append((operands, operator)) - operands = [] - elif peek == b_('%'): - # If we encounter a comment in the content stream, we have to - # handle it here. Typically, readObject will handle - # encountering a comment -- but readObject assumes that - # following the comment must be the object we're trying to - # read. In this case, it could be an operator instead. - while peek not in (b_('\r'), b_('\n')): - peek = stream.read(1) - else: - operands.append(readObject(stream, None)) - - def _readInlineImage(self, stream): - # begin reading just after the "BI" - begin image - # first read the dictionary of settings. - settings = DictionaryObject() - while True: - tok = readNonWhitespace(stream) - stream.seek(-1, 1) - if tok == b_("I"): - # "ID" - begin of image data - break - key = readObject(stream, self.pdf) - tok = readNonWhitespace(stream) - stream.seek(-1, 1) - value = readObject(stream, self.pdf) - settings[key] = value - # left at beginning of ID - tmp = stream.read(3) - assert tmp[:2] == b_("ID") - data = b_("") - while True: - # Read the inline image, while checking for EI (End Image) operator. - tok = stream.read(1) - if tok == b_("E"): - # Check for End Image - tok2 = stream.read(1) - if tok2 == b_("I"): - # Data can contain EI, so check for the Q operator. - tok3 = stream.read(1) - info = tok + tok2 - # We need to find whitespace between EI and Q. - has_q_whitespace = False - while tok3 in utils.WHITESPACES: - has_q_whitespace = True - info += tok3 - tok3 = stream.read(1) - if tok3 == b_("Q") and has_q_whitespace: - stream.seek(-1, 1) - break - else: - stream.seek(-1,1) - data += info - else: - stream.seek(-1, 1) - data += tok - else: - data += tok - return {"settings": settings, "data": data} - - def _getData(self): - newdata = BytesIO() - for operands, operator in self.operations: - if operator == b_("INLINE IMAGE"): - newdata.write(b_("BI")) - dicttext = BytesIO() - operands["settings"].writeToStream(dicttext, None) - newdata.write(dicttext.getvalue()[2:-2]) - newdata.write(b_("ID ")) - newdata.write(operands["data"]) - newdata.write(b_("EI")) - else: - for op in operands: - op.writeToStream(newdata, None) - newdata.write(b_(" ")) - newdata.write(b_(operator)) - newdata.write(b_("\n")) - return newdata.getvalue() - - def _setData(self, value): - self.__parseContentStream(BytesIO(b_(value))) - - _data = property(_getData, _setData) - - -class DocumentInformation(DictionaryObject): - """ - A class representing the basic document metadata provided in a PDF File. - This class is accessible through - :meth:`getDocumentInfo()` - - All text properties of the document metadata have - *two* properties, eg. author and author_raw. The non-raw property will - always return a ``TextStringObject``, making it ideal for a case where - the metadata is being displayed. The raw property can sometimes return - a ``ByteStringObject``, if PyPDF2 was unable to decode the string's - text encoding; this requires additional safety in the caller and - therefore is not as commonly accessed. - """ - - def __init__(self): - DictionaryObject.__init__(self) - - def getText(self, key): - retval = self.get(key, None) - if isinstance(retval, TextStringObject): - return retval - return None - - title = property(lambda self: self.getText("/Title")) - """Read-only property accessing the document's **title**. - Returns a unicode string (``TextStringObject``) or ``None`` - if the title is not specified.""" - title_raw = property(lambda self: self.get("/Title")) - """The "raw" version of title; can return a ``ByteStringObject``.""" - - author = property(lambda self: self.getText("/Author")) - """Read-only property accessing the document's **author**. - Returns a unicode string (``TextStringObject``) or ``None`` - if the author is not specified.""" - author_raw = property(lambda self: self.get("/Author")) - """The "raw" version of author; can return a ``ByteStringObject``.""" - - subject = property(lambda self: self.getText("/Subject")) - """Read-only property accessing the document's **subject**. - Returns a unicode string (``TextStringObject``) or ``None`` - if the subject is not specified.""" - subject_raw = property(lambda self: self.get("/Subject")) - """The "raw" version of subject; can return a ``ByteStringObject``.""" - - creator = property(lambda self: self.getText("/Creator")) - """Read-only property accessing the document's **creator**. If the - document was converted to PDF from another format, this is the name of the - application (e.g. OpenOffice) that created the original document from - which it was converted. Returns a unicode string (``TextStringObject``) - or ``None`` if the creator is not specified.""" - creator_raw = property(lambda self: self.get("/Creator")) - """The "raw" version of creator; can return a ``ByteStringObject``.""" - - producer = property(lambda self: self.getText("/Producer")) - """Read-only property accessing the document's **producer**. - If the document was converted to PDF from another format, this is - the name of the application (for example, OSX Quartz) that converted - it to PDF. Returns a unicode string (``TextStringObject``) - or ``None`` if the producer is not specified.""" - producer_raw = property(lambda self: self.get("/Producer")) - """The "raw" version of producer; can return a ``ByteStringObject``.""" - - -def convertToInt(d, size): - if size > 8: - raise utils.PdfReadError("invalid size in convertToInt") - d = b_("\x00\x00\x00\x00\x00\x00\x00\x00") + b_(d) - d = d[-8:] - return struct.unpack(">q", d)[0] - -# ref: pdf1.8 spec section 3.5.2 algorithm 3.2 -_encryption_padding = b_('\x28\xbf\x4e\x5e\x4e\x75\x8a\x41\x64\x00\x4e\x56') + \ - b_('\xff\xfa\x01\x08\x2e\x2e\x00\xb6\xd0\x68\x3e\x80\x2f\x0c') + \ - b_('\xa9\xfe\x64\x53\x69\x7a') - - -# Implementation of algorithm 3.2 of the PDF standard security handler, -# section 3.5.2 of the PDF 1.6 reference. -def _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt=True): - # 1. Pad or truncate the password string to exactly 32 bytes. If the - # password string is more than 32 bytes long, use only its first 32 bytes; - # if it is less than 32 bytes long, pad it by appending the required number - # of additional bytes from the beginning of the padding string - # (_encryption_padding). - password = b_((str_(password) + str_(_encryption_padding))[:32]) - # 2. Initialize the MD5 hash function and pass the result of step 1 as - # input to this function. - import struct - m = md5(password) - # 3. Pass the value of the encryption dictionary's /O entry to the MD5 hash - # function. - m.update(owner_entry.original_bytes) - # 4. Treat the value of the /P entry as an unsigned 4-byte integer and pass - # these bytes to the MD5 hash function, low-order byte first. - p_entry = struct.pack('= 3 and not metadata_encrypt: - m.update(b_("\xff\xff\xff\xff")) - # 7. Finish the hash. - md5_hash = m.digest() - # 8. (Revision 3 or greater) Do the following 50 times: Take the output - # from the previous MD5 hash and pass the first n bytes of the output as - # input into a new MD5 hash, where n is the number of bytes of the - # encryption key as defined by the value of the encryption dictionary's - # /Length entry. - if rev >= 3: - for i in range(50): - md5_hash = md5(md5_hash[:keylen]).digest() - # 9. Set the encryption key to the first n bytes of the output from the - # final MD5 hash, where n is always 5 for revision 2 but, for revision 3 or - # greater, depends on the value of the encryption dictionary's /Length - # entry. - return md5_hash[:keylen] - - -# Implementation of algorithm 3.3 of the PDF standard security handler, -# section 3.5.2 of the PDF 1.6 reference. -def _alg33(owner_pwd, user_pwd, rev, keylen): - # steps 1 - 4 - key = _alg33_1(owner_pwd, rev, keylen) - # 5. Pad or truncate the user password string as described in step 1 of - # algorithm 3.2. - user_pwd = b_((user_pwd + str_(_encryption_padding))[:32]) - # 6. Encrypt the result of step 5, using an RC4 encryption function with - # the encryption key obtained in step 4. - val = utils.RC4_encrypt(key, user_pwd) - # 7. (Revision 3 or greater) Do the following 19 times: Take the output - # from the previous invocation of the RC4 function and pass it as input to - # a new invocation of the function; use an encryption key generated by - # taking each byte of the encryption key obtained in step 4 and performing - # an XOR operation between that byte and the single-byte value of the - # iteration counter (from 1 to 19). - if rev >= 3: - for i in range(1, 20): - new_key = '' - for l in range(len(key)): - new_key += chr(ord_(key[l]) ^ i) - val = utils.RC4_encrypt(new_key, val) - # 8. Store the output from the final invocation of the RC4 as the value of - # the /O entry in the encryption dictionary. - return val - - -# Steps 1-4 of algorithm 3.3 -def _alg33_1(password, rev, keylen): - # 1. Pad or truncate the owner password string as described in step 1 of - # algorithm 3.2. If there is no owner password, use the user password - # instead. - password = b_((password + str_(_encryption_padding))[:32]) - # 2. Initialize the MD5 hash function and pass the result of step 1 as - # input to this function. - m = md5(password) - # 3. (Revision 3 or greater) Do the following 50 times: Take the output - # from the previous MD5 hash and pass it as input into a new MD5 hash. - md5_hash = m.digest() - if rev >= 3: - for i in range(50): - md5_hash = md5(md5_hash).digest() - # 4. Create an RC4 encryption key using the first n bytes of the output - # from the final MD5 hash, where n is always 5 for revision 2 but, for - # revision 3 or greater, depends on the value of the encryption - # dictionary's /Length entry. - key = md5_hash[:keylen] - return key - - -# Implementation of algorithm 3.4 of the PDF standard security handler, -# section 3.5.2 of the PDF 1.6 reference. -def _alg34(password, owner_entry, p_entry, id1_entry): - # 1. Create an encryption key based on the user password string, as - # described in algorithm 3.2. - key = _alg32(password, 2, 5, owner_entry, p_entry, id1_entry) - # 2. Encrypt the 32-byte padding string shown in step 1 of algorithm 3.2, - # using an RC4 encryption function with the encryption key from the - # preceding step. - U = utils.RC4_encrypt(key, _encryption_padding) - # 3. Store the result of step 2 as the value of the /U entry in the - # encryption dictionary. - return U, key - - -# Implementation of algorithm 3.4 of the PDF standard security handler, -# section 3.5.2 of the PDF 1.6 reference. -def _alg35(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt): - # 1. Create an encryption key based on the user password string, as - # described in Algorithm 3.2. - key = _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry) - # 2. Initialize the MD5 hash function and pass the 32-byte padding string - # shown in step 1 of Algorithm 3.2 as input to this function. - m = md5() - m.update(_encryption_padding) - # 3. Pass the first element of the file's file identifier array (the value - # of the ID entry in the document's trailer dictionary; see Table 3.13 on - # page 73) to the hash function and finish the hash. (See implementation - # note 25 in Appendix H.) - m.update(id1_entry.original_bytes) - md5_hash = m.digest() - # 4. Encrypt the 16-byte result of the hash, using an RC4 encryption - # function with the encryption key from step 1. - val = utils.RC4_encrypt(key, md5_hash) - # 5. Do the following 19 times: Take the output from the previous - # invocation of the RC4 function and pass it as input to a new invocation - # of the function; use an encryption key generated by taking each byte of - # the original encryption key (obtained in step 2) and performing an XOR - # operation between that byte and the single-byte value of the iteration - # counter (from 1 to 19). - for i in range(1, 20): - new_key = b_('') - for l in range(len(key)): - new_key += b_(chr(ord_(key[l]) ^ i)) - val = utils.RC4_encrypt(new_key, val) - # 6. Append 16 bytes of arbitrary padding to the output from the final - # invocation of the RC4 function and store the 32-byte result as the value - # of the U entry in the encryption dictionary. - # (implementator note: I don't know what "arbitrary padding" is supposed to - # mean, so I have used null bytes. This seems to match a few other - # people's implementations) - return val + (b_('\x00') * 16), key diff --git a/vendor/PyPDF2/utils.py b/vendor/PyPDF2/utils.py deleted file mode 100755 index 718a875c..00000000 --- a/vendor/PyPDF2/utils.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright (c) 2006, Mathieu Fenniak -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * The name of the author may not be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -Utility functions for PDF library. -""" -__author__ = "Mathieu Fenniak" -__author_email__ = "biziqe@mathieu.fenniak.net" - - -import sys - -try: - import __builtin__ as builtins -except ImportError: # Py3 - import builtins - - -xrange_fn = getattr(builtins, "xrange", range) -_basestring = getattr(builtins, "basestring", str) - -bytes_type = type(bytes()) # Works the same in Python 2.X and 3.X -string_type = getattr(builtins, "unicode", str) -int_types = (int, long) if sys.version_info[0] < 3 else (int,) - - -# Make basic type tests more consistent -def isString(s): - """Test if arg is a string. Compatible with Python 2 and 3.""" - return isinstance(s, _basestring) - - -def isInt(n): - """Test if arg is an int. Compatible with Python 2 and 3.""" - return isinstance(n, int_types) - - -def isBytes(b): - """Test if arg is a bytes instance. Compatible with Python 2 and 3.""" - return isinstance(b, bytes_type) - - -#custom implementation of warnings.formatwarning -def formatWarning(message, category, filename, lineno, line=None): - file = filename.replace("/", "\\").rsplit("\\", 1)[1] # find the file name - return "%s: %s [%s:%s]\n" % (category.__name__, message, file, lineno) - - -def readUntilWhitespace(stream, maxchars=None): - """ - Reads non-whitespace characters and returns them. - Stops upon encountering whitespace or when maxchars is reached. - """ - txt = b_("") - while True: - tok = stream.read(1) - if tok.isspace() or not tok: - break - txt += tok - if len(txt) == maxchars: - break - return txt - - -def readNonWhitespace(stream): - """ - Finds and reads the next non-whitespace character (ignores whitespace). - """ - tok = WHITESPACES[0] - while tok in WHITESPACES: - tok = stream.read(1) - return tok - - -def skipOverWhitespace(stream): - """ - Similar to readNonWhitespace, but returns a Boolean if more than - one whitespace character was read. - """ - tok = WHITESPACES[0] - cnt = 0; - while tok in WHITESPACES: - tok = stream.read(1) - cnt+=1 - return (cnt > 1) - - -def skipOverComment(stream): - tok = stream.read(1) - stream.seek(-1, 1) - if tok == b_('%'): - while tok not in (b_('\n'), b_('\r')): - tok = stream.read(1) - - -def readUntilRegex(stream, regex, ignore_eof=False): - """ - Reads until the regular expression pattern matched (ignore the match) - Raise PdfStreamError on premature end-of-file. - :param bool ignore_eof: If true, ignore end-of-line and return immediately - """ - name = b_('') - while True: - tok = stream.read(16) - if not tok: - # stream has truncated prematurely - if ignore_eof == True: - return name - else: - raise PdfStreamError("Stream has ended unexpectedly") - m = regex.search(tok) - if m is not None: - name += tok[:m.start()] - stream.seek(m.start()-len(tok), 1) - break - name += tok - return name - - -class ConvertFunctionsToVirtualList(object): - def __init__(self, lengthFunction, getFunction): - self.lengthFunction = lengthFunction - self.getFunction = getFunction - - def __len__(self): - return self.lengthFunction() - - def __getitem__(self, index): - if isinstance(index, slice): - indices = xrange_fn(*index.indices(len(self))) - cls = type(self) - return cls(indices.__len__, lambda idx: self[indices[idx]]) - if not isInt(index): - raise TypeError("sequence indices must be integers") - len_self = len(self) - if index < 0: - # support negative indexes - index = len_self + index - if index < 0 or index >= len_self: - raise IndexError("sequence index out of range") - return self.getFunction(index) - - -def RC4_encrypt(key, plaintext): - S = [i for i in range(256)] - j = 0 - for i in range(256): - j = (j + S[i] + ord_(key[i % len(key)])) % 256 - S[i], S[j] = S[j], S[i] - i, j = 0, 0 - retval = b_("") - for x in range(len(plaintext)): - i = (i + 1) % 256 - j = (j + S[i]) % 256 - S[i], S[j] = S[j], S[i] - t = S[(S[i] + S[j]) % 256] - retval += b_(chr(ord_(plaintext[x]) ^ t)) - return retval - - -def matrixMultiply(a, b): - return [[sum([float(i)*float(j) - for i, j in zip(row, col)] - ) for col in zip(*b)] - for row in a] - - -def markLocation(stream): - """Creates text file showing current location in context.""" - # Mainly for debugging - RADIUS = 5000 - stream.seek(-RADIUS, 1) - outputDoc = open('PyPDF2_pdfLocation.txt', 'w') - outputDoc.write(stream.read(RADIUS)) - outputDoc.write('HERE') - outputDoc.write(stream.read(RADIUS)) - outputDoc.close() - stream.seek(-RADIUS, 1) - - -class PyPdfError(Exception): - pass - - -class PdfReadError(PyPdfError): - pass - - -class PageSizeNotDefinedError(PyPdfError): - pass - - -class PdfReadWarning(UserWarning): - pass - - -class PdfStreamError(PdfReadError): - pass - - -if sys.version_info[0] < 3: - def b_(s): - return s -else: - B_CACHE = {} - - def b_(s): - bc = B_CACHE - if s in bc: - return bc[s] - if type(s) == bytes: - return s - else: - r = s.encode('latin-1') - if len(s) < 2: - bc[s] = r - return r - - -def u_(s): - if sys.version_info[0] < 3: - return unicode(s, 'unicode_escape') - else: - return s - - -def str_(b): - if sys.version_info[0] < 3: - return b - else: - if type(b) == bytes: - return b.decode('latin-1') - else: - return b - - -def ord_(b): - if sys.version_info[0] < 3 or type(b) == str: - return ord(b) - else: - return b - - -def chr_(c): - if sys.version_info[0] < 3: - return c - else: - return chr(c) - - -def barray(b): - if sys.version_info[0] < 3: - return b - else: - return bytearray(b) - - -def hexencode(b): - if sys.version_info[0] < 3: - return b.encode('hex') - else: - import codecs - coder = codecs.getencoder('hex_codec') - return coder(b)[0] - - -def hexStr(num): - return hex(num).replace('L', '') - - -WHITESPACES = [b_(x) for x in [' ', '\n', '\r', '\t', '\x00']] diff --git a/vendor/PyPDF2/xmp.py b/vendor/PyPDF2/xmp.py deleted file mode 100755 index 7ba62f0d..00000000 --- a/vendor/PyPDF2/xmp.py +++ /dev/null @@ -1,358 +0,0 @@ -import re -import datetime -import decimal -from .generic import PdfObject -from xml.dom import getDOMImplementation -from xml.dom.minidom import parseString -from .utils import u_ - -RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#" -DC_NAMESPACE = "http://purl.org/dc/elements/1.1/" -XMP_NAMESPACE = "http://ns.adobe.com/xap/1.0/" -PDF_NAMESPACE = "http://ns.adobe.com/pdf/1.3/" -XMPMM_NAMESPACE = "http://ns.adobe.com/xap/1.0/mm/" - -# What is the PDFX namespace, you might ask? I might ask that too. It's -# a completely undocumented namespace used to place "custom metadata" -# properties, which are arbitrary metadata properties with no semantic or -# documented meaning. Elements in the namespace are key/value-style storage, -# where the element name is the key and the content is the value. The keys -# are transformed into valid XML identifiers by substituting an invalid -# identifier character with \u2182 followed by the unicode hex ID of the -# original character. A key like "my car" is therefore "my\u21820020car". -# -# \u2182, in case you're wondering, is the unicode character -# \u{ROMAN NUMERAL TEN THOUSAND}, a straightforward and obvious choice for -# escaping characters. -# -# Intentional users of the pdfx namespace should be shot on sight. A -# custom data schema and sensical XML elements could be used instead, as is -# suggested by Adobe's own documentation on XMP (under "Extensibility of -# Schemas"). -# -# Information presented here on the /pdfx/ schema is a result of limited -# reverse engineering, and does not constitute a full specification. -PDFX_NAMESPACE = "http://ns.adobe.com/pdfx/1.3/" - -iso8601 = re.compile(""" - (?P[0-9]{4}) - (- - (?P[0-9]{2}) - (- - (?P[0-9]+) - (T - (?P[0-9]{2}): - (?P[0-9]{2}) - (:(?P[0-9]{2}(.[0-9]+)?))? - (?PZ|[-+][0-9]{2}:[0-9]{2}) - )? - )? - )? - """, re.VERBOSE) - - -class XmpInformation(PdfObject): - """ - An object that represents Adobe XMP metadata. - Usually accessed by :meth:`getXmpMetadata()` - """ - - def __init__(self, stream): - self.stream = stream - docRoot = parseString(self.stream.getData()) - self.rdfRoot = docRoot.getElementsByTagNameNS(RDF_NAMESPACE, "RDF")[0] - self.cache = {} - - def writeToStream(self, stream, encryption_key): - self.stream.writeToStream(stream, encryption_key) - - def getElement(self, aboutUri, namespace, name): - for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"): - if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri: - attr = desc.getAttributeNodeNS(namespace, name) - if attr != None: - yield attr - for element in desc.getElementsByTagNameNS(namespace, name): - yield element - - def getNodesInNamespace(self, aboutUri, namespace): - for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"): - if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri: - for i in range(desc.attributes.length): - attr = desc.attributes.item(i) - if attr.namespaceURI == namespace: - yield attr - for child in desc.childNodes: - if child.namespaceURI == namespace: - yield child - - def _getText(self, element): - text = "" - for child in element.childNodes: - if child.nodeType == child.TEXT_NODE: - text += child.data - return text - - def _converter_string(value): - return value - - def _converter_date(value): - m = iso8601.match(value) - year = int(m.group("year")) - month = int(m.group("month") or "1") - day = int(m.group("day") or "1") - hour = int(m.group("hour") or "0") - minute = int(m.group("minute") or "0") - second = decimal.Decimal(m.group("second") or "0") - seconds = second.to_integral(decimal.ROUND_FLOOR) - milliseconds = (second - seconds) * 1000000 - tzd = m.group("tzd") or "Z" - dt = datetime.datetime(year, month, day, hour, minute, seconds, milliseconds) - if tzd != "Z": - tzd_hours, tzd_minutes = [int(x) for x in tzd.split(":")] - tzd_hours *= -1 - if tzd_hours < 0: - tzd_minutes *= -1 - dt = dt + datetime.timedelta(hours=tzd_hours, minutes=tzd_minutes) - return dt - _test_converter_date = staticmethod(_converter_date) - - def _getter_bag(namespace, name, converter): - def get(self): - cached = self.cache.get(namespace, {}).get(name) - if cached: - return cached - retval = [] - for element in self.getElement("", namespace, name): - bags = element.getElementsByTagNameNS(RDF_NAMESPACE, "Bag") - if len(bags): - for bag in bags: - for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, "li"): - value = self._getText(item) - value = converter(value) - retval.append(value) - ns_cache = self.cache.setdefault(namespace, {}) - ns_cache[name] = retval - return retval - return get - - def _getter_seq(namespace, name, converter): - def get(self): - cached = self.cache.get(namespace, {}).get(name) - if cached: - return cached - retval = [] - for element in self.getElement("", namespace, name): - seqs = element.getElementsByTagNameNS(RDF_NAMESPACE, "Seq") - if len(seqs): - for seq in seqs: - for item in seq.getElementsByTagNameNS(RDF_NAMESPACE, "li"): - value = self._getText(item) - value = converter(value) - retval.append(value) - else: - value = converter(self._getText(element)) - retval.append(value) - ns_cache = self.cache.setdefault(namespace, {}) - ns_cache[name] = retval - return retval - return get - - def _getter_langalt(namespace, name, converter): - def get(self): - cached = self.cache.get(namespace, {}).get(name) - if cached: - return cached - retval = {} - for element in self.getElement("", namespace, name): - alts = element.getElementsByTagNameNS(RDF_NAMESPACE, "Alt") - if len(alts): - for alt in alts: - for item in alt.getElementsByTagNameNS(RDF_NAMESPACE, "li"): - value = self._getText(item) - value = converter(value) - retval[item.getAttribute("xml:lang")] = value - else: - retval["x-default"] = converter(self._getText(element)) - ns_cache = self.cache.setdefault(namespace, {}) - ns_cache[name] = retval - return retval - return get - - def _getter_single(namespace, name, converter): - def get(self): - cached = self.cache.get(namespace, {}).get(name) - if cached: - return cached - value = None - for element in self.getElement("", namespace, name): - if element.nodeType == element.ATTRIBUTE_NODE: - value = element.nodeValue - else: - value = self._getText(element) - break - if value != None: - value = converter(value) - ns_cache = self.cache.setdefault(namespace, {}) - ns_cache[name] = value - return value - return get - - dc_contributor = property(_getter_bag(DC_NAMESPACE, "contributor", _converter_string)) - """ - Contributors to the resource (other than the authors). An unsorted - array of names. - """ - - dc_coverage = property(_getter_single(DC_NAMESPACE, "coverage", _converter_string)) - """ - Text describing the extent or scope of the resource. - """ - - dc_creator = property(_getter_seq(DC_NAMESPACE, "creator", _converter_string)) - """ - A sorted array of names of the authors of the resource, listed in order - of precedence. - """ - - dc_date = property(_getter_seq(DC_NAMESPACE, "date", _converter_date)) - """ - A sorted array of dates (datetime.datetime instances) of signifigance to - the resource. The dates and times are in UTC. - """ - - dc_description = property(_getter_langalt(DC_NAMESPACE, "description", _converter_string)) - """ - A language-keyed dictionary of textual descriptions of the content of the - resource. - """ - - dc_format = property(_getter_single(DC_NAMESPACE, "format", _converter_string)) - """ - The mime-type of the resource. - """ - - dc_identifier = property(_getter_single(DC_NAMESPACE, "identifier", _converter_string)) - """ - Unique identifier of the resource. - """ - - dc_language = property(_getter_bag(DC_NAMESPACE, "language", _converter_string)) - """ - An unordered array specifying the languages used in the resource. - """ - - dc_publisher = property(_getter_bag(DC_NAMESPACE, "publisher", _converter_string)) - """ - An unordered array of publisher names. - """ - - dc_relation = property(_getter_bag(DC_NAMESPACE, "relation", _converter_string)) - """ - An unordered array of text descriptions of relationships to other - documents. - """ - - dc_rights = property(_getter_langalt(DC_NAMESPACE, "rights", _converter_string)) - """ - A language-keyed dictionary of textual descriptions of the rights the - user has to this resource. - """ - - dc_source = property(_getter_single(DC_NAMESPACE, "source", _converter_string)) - """ - Unique identifier of the work from which this resource was derived. - """ - - dc_subject = property(_getter_bag(DC_NAMESPACE, "subject", _converter_string)) - """ - An unordered array of descriptive phrases or keywrods that specify the - topic of the content of the resource. - """ - - dc_title = property(_getter_langalt(DC_NAMESPACE, "title", _converter_string)) - """ - A language-keyed dictionary of the title of the resource. - """ - - dc_type = property(_getter_bag(DC_NAMESPACE, "type", _converter_string)) - """ - An unordered array of textual descriptions of the document type. - """ - - pdf_keywords = property(_getter_single(PDF_NAMESPACE, "Keywords", _converter_string)) - """ - An unformatted text string representing document keywords. - """ - - pdf_pdfversion = property(_getter_single(PDF_NAMESPACE, "PDFVersion", _converter_string)) - """ - The PDF file version, for example 1.0, 1.3. - """ - - pdf_producer = property(_getter_single(PDF_NAMESPACE, "Producer", _converter_string)) - """ - The name of the tool that created the PDF document. - """ - - xmp_createDate = property(_getter_single(XMP_NAMESPACE, "CreateDate", _converter_date)) - """ - The date and time the resource was originally created. The date and - time are returned as a UTC datetime.datetime object. - """ - - xmp_modifyDate = property(_getter_single(XMP_NAMESPACE, "ModifyDate", _converter_date)) - """ - The date and time the resource was last modified. The date and time - are returned as a UTC datetime.datetime object. - """ - - xmp_metadataDate = property(_getter_single(XMP_NAMESPACE, "MetadataDate", _converter_date)) - """ - The date and time that any metadata for this resource was last - changed. The date and time are returned as a UTC datetime.datetime - object. - """ - - xmp_creatorTool = property(_getter_single(XMP_NAMESPACE, "CreatorTool", _converter_string)) - """ - The name of the first known tool used to create the resource. - """ - - xmpmm_documentId = property(_getter_single(XMPMM_NAMESPACE, "DocumentID", _converter_string)) - """ - The common identifier for all versions and renditions of this resource. - """ - - xmpmm_instanceId = property(_getter_single(XMPMM_NAMESPACE, "InstanceID", _converter_string)) - """ - An identifier for a specific incarnation of a document, updated each - time a file is saved. - """ - - def custom_properties(self): - if not hasattr(self, "_custom_properties"): - self._custom_properties = {} - for node in self.getNodesInNamespace("", PDFX_NAMESPACE): - key = node.localName - while True: - # see documentation about PDFX_NAMESPACE earlier in file - idx = key.find(u_("\u2182")) - if idx == -1: - break - key = key[:idx] + chr(int(key[idx+1:idx+5], base=16)) + key[idx+5:] - if node.nodeType == node.ATTRIBUTE_NODE: - value = node.nodeValue - else: - value = self._getText(node) - self._custom_properties[key] = value - return self._custom_properties - - custom_properties = property(custom_properties) - """ - Retrieves custom metadata properties defined in the undocumented pdfx - metadata schema. - - :return: a dictionary of key/value items for custom metadata properties. - :rtype: dict - """ diff --git a/vendor/_version.py b/vendor/_version.py deleted file mode 100644 index 6d013711..00000000 --- a/vendor/_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '5.0.6' \ No newline at end of file diff --git a/vendor/babel/AUTHORS b/vendor/babel/AUTHORS deleted file mode 100644 index 09d0bc03..00000000 --- a/vendor/babel/AUTHORS +++ /dev/null @@ -1,28 +0,0 @@ -Babel is written and maintained by the Babel team and various contributors: - -Maintainer and Current Project Lead: - -- Armin Ronacher - -Contributors: - -- Christopher Lenz -- Alex Morega -- Felix Schwarz -- Pedro Algarvio -- Jeroen Ruigrok van der Werven -- Philip Jenvey -- Tobias Bieniek -- Jonas Borgström -- Daniel Neuhäuser -- Nick Retallack -- Thomas Waldmann -- Lennart Regebro - -Babel was previously developed under the Copyright of Edgewall Software. The -following copyright notice holds true for releases before 2013: "Copyright (c) -2007 - 2011 by Edgewall Software" - -In addition to the regular contributions Babel includes a fork of Lennart -Regebro's tzlocal that originally was licensed under the CC0 license. The -original copyright of that project is "Copyright 2013 by Lennart Regebro". diff --git a/vendor/babel/LICENSE b/vendor/babel/LICENSE deleted file mode 100644 index 1f1f55b6..00000000 --- a/vendor/babel/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (C) 2013 by the Babel Team, see AUTHORS for more information. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. The name of the author may not be used to endorse or promote - products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER -IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/babel/__init__.py b/vendor/babel/__init__.py deleted file mode 100644 index dd9f17e0..00000000 --- a/vendor/babel/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel - ~~~~~ - - Integrated collection of utilities that assist in internationalizing and - localizing applications. - - This package is basically composed of two major parts: - - * tools to build and work with ``gettext`` message catalogs - * a Python interface to the CLDR (Common Locale Data Repository), providing - access to various locale display names, localized number and date - formatting, etc. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -from babel.core import UnknownLocaleError, Locale, default_locale, \ - negotiate_locale, parse_locale, get_locale_identifier - - -__version__ = '1.3' diff --git a/vendor/babel/_compat.py b/vendor/babel/_compat.py deleted file mode 100644 index 86096daa..00000000 --- a/vendor/babel/_compat.py +++ /dev/null @@ -1,51 +0,0 @@ -import sys - -PY2 = sys.version_info[0] == 2 - -_identity = lambda x: x - - -if not PY2: - text_type = str - string_types = (str,) - integer_types = (int, ) - unichr = chr - - text_to_native = lambda s, enc: s - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - from io import StringIO, BytesIO - import pickle - - izip = zip - imap = map - range_type = range - - cmp = lambda a, b: (a > b) - (a < b) - -else: - text_type = unicode - string_types = (str, unicode) - integer_types = (int, long) - - text_to_native = lambda s, enc: s.encode(enc) - unichr = unichr - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - from cStringIO import StringIO as BytesIO - from StringIO import StringIO - import cPickle as pickle - - from itertools import izip, imap - range_type = xrange - - cmp = cmp - - -number_types = integer_types + (float,) diff --git a/vendor/babel/core.py b/vendor/babel/core.py deleted file mode 100644 index 6e6e6d61..00000000 --- a/vendor/babel/core.py +++ /dev/null @@ -1,941 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.core - ~~~~~~~~~~ - - Core locale representation and locale data access. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -import os - -from babel import localedata -from babel._compat import pickle, string_types - -__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale', - 'parse_locale'] - - -_global_data = None - - -def _raise_no_data_error(): - raise RuntimeError('The babel data files are not available. ' - 'This usually happens because you are using ' - 'a source checkout from Babel and you did ' - 'not build the data files. Just make sure ' - 'to run "python setup.py import_cldr" before ' - 'installing the library.') - - -def get_global(key): - """Return the dictionary for the given key in the global data. - - The global data is stored in the ``babel/global.dat`` file and contains - information independent of individual locales. - - >>> get_global('zone_aliases')['UTC'] - u'Etc/GMT' - >>> get_global('zone_territories')['Europe/Berlin'] - u'DE' - - .. versionadded:: 0.9 - - :param key: the data key - """ - global _global_data - if _global_data is None: - dirname = os.path.join(os.path.dirname(__file__)) - filename = os.path.join(dirname, 'global.dat') - if not os.path.isfile(filename): - _raise_no_data_error() - fileobj = open(filename, 'rb') - try: - _global_data = pickle.load(fileobj) - finally: - fileobj.close() - return _global_data.get(key, {}) - - -LOCALE_ALIASES = { - 'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ', - 'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES', - 'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES', - 'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT', - 'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV', - 'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL', - 'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI', - 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA' -} - - -class UnknownLocaleError(Exception): - """Exception thrown when a locale is requested for which no locale data - is available. - """ - - def __init__(self, identifier): - """Create the exception. - - :param identifier: the identifier string of the unsupported locale - """ - Exception.__init__(self, 'unknown locale %r' % identifier) - - #: The identifier of the locale that could not be found. - self.identifier = identifier - - -class Locale(object): - """Representation of a specific locale. - - >>> locale = Locale('en', 'US') - >>> repr(locale) - "Locale('en', territory='US')" - >>> locale.display_name - u'English (United States)' - - A `Locale` object can also be instantiated from a raw locale string: - - >>> locale = Locale.parse('en-US', sep='-') - >>> repr(locale) - "Locale('en', territory='US')" - - `Locale` objects provide access to a collection of locale data, such as - territory and language names, number and date format patterns, and more: - - >>> locale.number_symbols['decimal'] - u'.' - - If a locale is requested for which no locale data is available, an - `UnknownLocaleError` is raised: - - >>> Locale.parse('en_DE') - Traceback (most recent call last): - ... - UnknownLocaleError: unknown locale 'en_DE' - - For more information see :rfc:`3066`. - """ - - def __init__(self, language, territory=None, script=None, variant=None): - """Initialize the locale object from the given identifier components. - - >>> locale = Locale('en', 'US') - >>> locale.language - 'en' - >>> locale.territory - 'US' - - :param language: the language code - :param territory: the territory (country or region) code - :param script: the script code - :param variant: the variant code - :raise `UnknownLocaleError`: if no locale data is available for the - requested locale - """ - #: the language code - self.language = language - #: the territory (country or region) code - self.territory = territory - #: the script code - self.script = script - #: the variant code - self.variant = variant - self.__data = None - - identifier = str(self) - if not localedata.exists(identifier): - raise UnknownLocaleError(identifier) - - @classmethod - def default(cls, category=None, aliases=LOCALE_ALIASES): - """Return the system default locale for the specified category. - - >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']: - ... os.environ[name] = '' - >>> os.environ['LANG'] = 'fr_FR.UTF-8' - >>> Locale.default('LC_MESSAGES') - Locale('fr', territory='FR') - - The following fallbacks to the variable are always considered: - - - ``LANGUAGE`` - - ``LC_ALL`` - - ``LC_CTYPE`` - - ``LANG`` - - :param category: one of the ``LC_XXX`` environment variable names - :param aliases: a dictionary of aliases for locale identifiers - """ - # XXX: use likely subtag expansion here instead of the - # aliases dictionary. - locale_string = default_locale(category, aliases=aliases) - return cls.parse(locale_string) - - @classmethod - def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES): - """Find the best match between available and requested locale strings. - - >>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT']) - Locale('de', territory='DE') - >>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de']) - Locale('de') - >>> Locale.negotiate(['de_DE', 'de'], ['en_US']) - - You can specify the character used in the locale identifiers to separate - the differnet components. This separator is applied to both lists. Also, - case is ignored in the comparison: - - >>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-') - Locale('de', territory='DE') - - :param preferred: the list of locale identifers preferred by the user - :param available: the list of locale identifiers available - :param aliases: a dictionary of aliases for locale identifiers - """ - identifier = negotiate_locale(preferred, available, sep=sep, - aliases=aliases) - if identifier: - return Locale.parse(identifier, sep=sep) - - @classmethod - def parse(cls, identifier, sep='_', resolve_likely_subtags=True): - """Create a `Locale` instance for the given locale identifier. - - >>> l = Locale.parse('de-DE', sep='-') - >>> l.display_name - u'Deutsch (Deutschland)' - - If the `identifier` parameter is not a string, but actually a `Locale` - object, that object is returned: - - >>> Locale.parse(l) - Locale('de', territory='DE') - - This also can perform resolving of likely subtags which it does - by default. This is for instance useful to figure out the most - likely locale for a territory you can use ``'und'`` as the - language tag: - - >>> Locale.parse('und_AT') - Locale('de', territory='AT') - - :param identifier: the locale identifier string - :param sep: optional component separator - :param resolve_likely_subtags: if this is specified then a locale will - have its likely subtag resolved if the - locale otherwise does not exist. For - instance ``zh_TW`` by itself is not a - locale that exists but Babel can - automatically expand it to the full - form of ``zh_hant_TW``. Note that this - expansion is only taking place if no - locale exists otherwise. For instance - there is a locale ``en`` that can exist - by itself. - :raise `ValueError`: if the string does not appear to be a valid locale - identifier - :raise `UnknownLocaleError`: if no locale data is available for the - requested locale - """ - if identifier is None: - return None - elif isinstance(identifier, Locale): - return identifier - elif not isinstance(identifier, string_types): - raise TypeError('Unxpected value for identifier: %r' % (identifier,)) - - parts = parse_locale(identifier, sep=sep) - input_id = get_locale_identifier(parts) - - def _try_load(parts): - try: - return cls(*parts) - except UnknownLocaleError: - return None - - def _try_load_reducing(parts): - # Success on first hit, return it. - locale = _try_load(parts) - if locale is not None: - return locale - - # Now try without script and variant - locale = _try_load(parts[:2]) - if locale is not None: - return locale - - locale = _try_load(parts) - if locale is not None: - return locale - if not resolve_likely_subtags: - raise UnknownLocaleError(input_id) - - # From here onwards is some very bad likely subtag resolving. This - # whole logic is not entirely correct but good enough (tm) for the - # time being. This has been added so that zh_TW does not cause - # errors for people when they upgrade. Later we should properly - # implement ICU like fuzzy locale objects and provide a way to - # maximize and minimize locale tags. - - language, territory, script, variant = parts - language = get_global('language_aliases').get(language, language) - territory = get_global('territory_aliases').get(territory, territory) - script = get_global('script_aliases').get(script, script) - variant = get_global('variant_aliases').get(variant, variant) - - if territory == 'ZZ': - territory = None - if script == 'Zzzz': - script = None - - parts = language, territory, script, variant - - # First match: try the whole identifier - new_id = get_locale_identifier(parts) - likely_subtag = get_global('likely_subtags').get(new_id) - if likely_subtag is not None: - locale = _try_load_reducing(parse_locale(likely_subtag)) - if locale is not None: - return locale - - # If we did not find anything so far, try again with a - # simplified identifier that is just the language - likely_subtag = get_global('likely_subtags').get(language) - if likely_subtag is not None: - language2, _, script2, variant2 = parse_locale(likely_subtag) - locale = _try_load_reducing((language2, territory, script2, variant2)) - if locale is not None: - return locale - - raise UnknownLocaleError(input_id) - - def __eq__(self, other): - for key in ('language', 'territory', 'script', 'variant'): - if not hasattr(other, key): - return False - return (self.language == other.language) and \ - (self.territory == other.territory) and \ - (self.script == other.script) and \ - (self.variant == other.variant) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - parameters = [''] - for key in ('territory', 'script', 'variant'): - value = getattr(self, key) - if value is not None: - parameters.append('%s=%r' % (key, value)) - parameter_string = '%r' % self.language + ', '.join(parameters) - return 'Locale(%s)' % parameter_string - - def __str__(self): - return get_locale_identifier((self.language, self.territory, - self.script, self.variant)) - - @property - def _data(self): - if self.__data is None: - self.__data = localedata.LocaleDataDict(localedata.load(str(self))) - return self.__data - - def get_display_name(self, locale=None): - """Return the display name of the locale using the given locale. - - The display name will include the language, territory, script, and - variant, if those are specified. - - >>> Locale('zh', 'CN', script='Hans').get_display_name('en') - u'Chinese (Simplified, China)' - - :param locale: the locale to use - """ - if locale is None: - locale = self - locale = Locale.parse(locale) - retval = locale.languages.get(self.language) - if self.territory or self.script or self.variant: - details = [] - if self.script: - details.append(locale.scripts.get(self.script)) - if self.territory: - details.append(locale.territories.get(self.territory)) - if self.variant: - details.append(locale.variants.get(self.variant)) - details = filter(None, details) - if details: - retval += ' (%s)' % u', '.join(details) - return retval - - display_name = property(get_display_name, doc="""\ - The localized display name of the locale. - - >>> Locale('en').display_name - u'English' - >>> Locale('en', 'US').display_name - u'English (United States)' - >>> Locale('sv').display_name - u'svenska' - - :type: `unicode` - """) - - def get_language_name(self, locale=None): - """Return the language of this locale in the given locale. - - >>> Locale('zh', 'CN', script='Hans').get_language_name('de') - u'Chinesisch' - - .. versionadded:: 1.0 - - :param locale: the locale to use - """ - if locale is None: - locale = self - locale = Locale.parse(locale) - return locale.languages.get(self.language) - - language_name = property(get_language_name, doc="""\ - The localized language name of the locale. - - >>> Locale('en', 'US').language_name - u'English' - """) - - def get_territory_name(self, locale=None): - """Return the territory name in the given locale.""" - if locale is None: - locale = self - locale = Locale.parse(locale) - return locale.territories.get(self.territory) - - territory_name = property(get_territory_name, doc="""\ - The localized territory name of the locale if available. - - >>> Locale('de', 'DE').territory_name - u'Deutschland' - """) - - def get_script_name(self, locale=None): - """Return the script name in the given locale.""" - if locale is None: - locale = self - locale = Locale.parse(locale) - return locale.scripts.get(self.script) - - script_name = property(get_script_name, doc="""\ - The localized script name of the locale if available. - - >>> Locale('ms', 'SG', script='Latn').script_name - u'Latin' - """) - - @property - def english_name(self): - """The english display name of the locale. - - >>> Locale('de').english_name - u'German' - >>> Locale('de', 'DE').english_name - u'German (Germany)' - - :type: `unicode`""" - return self.get_display_name(Locale('en')) - - #{ General Locale Display Names - - @property - def languages(self): - """Mapping of language codes to translated language names. - - >>> Locale('de', 'DE').languages['ja'] - u'Japanisch' - - See `ISO 639 `_ for - more information. - """ - return self._data['languages'] - - @property - def scripts(self): - """Mapping of script codes to translated script names. - - >>> Locale('en', 'US').scripts['Hira'] - u'Hiragana' - - See `ISO 15924 `_ - for more information. - """ - return self._data['scripts'] - - @property - def territories(self): - """Mapping of script codes to translated script names. - - >>> Locale('es', 'CO').territories['DE'] - u'Alemania' - - See `ISO 3166 `_ - for more information. - """ - return self._data['territories'] - - @property - def variants(self): - """Mapping of script codes to translated script names. - - >>> Locale('de', 'DE').variants['1901'] - u'Alte deutsche Rechtschreibung' - """ - return self._data['variants'] - - #{ Number Formatting - - @property - def currencies(self): - """Mapping of currency codes to translated currency names. This - only returns the generic form of the currency name, not the count - specific one. If an actual number is requested use the - :func:`babel.numbers.get_currency_name` function. - - >>> Locale('en').currencies['COP'] - u'Colombian Peso' - >>> Locale('de', 'DE').currencies['COP'] - u'Kolumbianischer Peso' - """ - return self._data['currency_names'] - - @property - def currency_symbols(self): - """Mapping of currency codes to symbols. - - >>> Locale('en', 'US').currency_symbols['USD'] - u'$' - >>> Locale('es', 'CO').currency_symbols['USD'] - u'US$' - """ - return self._data['currency_symbols'] - - @property - def number_symbols(self): - """Symbols used in number formatting. - - >>> Locale('fr', 'FR').number_symbols['decimal'] - u',' - """ - return self._data['number_symbols'] - - @property - def decimal_formats(self): - """Locale patterns for decimal number formatting. - - >>> Locale('en', 'US').decimal_formats[None] - - """ - return self._data['decimal_formats'] - - @property - def currency_formats(self): - """Locale patterns for currency number formatting. - - >>> print Locale('en', 'US').currency_formats[None] - - """ - return self._data['currency_formats'] - - @property - def percent_formats(self): - """Locale patterns for percent number formatting. - - >>> Locale('en', 'US').percent_formats[None] - - """ - return self._data['percent_formats'] - - @property - def scientific_formats(self): - """Locale patterns for scientific number formatting. - - >>> Locale('en', 'US').scientific_formats[None] - - """ - return self._data['scientific_formats'] - - #{ Calendar Information and Date Formatting - - @property - def periods(self): - """Locale display names for day periods (AM/PM). - - >>> Locale('en', 'US').periods['am'] - u'AM' - """ - return self._data['periods'] - - @property - def days(self): - """Locale display names for weekdays. - - >>> Locale('de', 'DE').days['format']['wide'][3] - u'Donnerstag' - """ - return self._data['days'] - - @property - def months(self): - """Locale display names for months. - - >>> Locale('de', 'DE').months['format']['wide'][10] - u'Oktober' - """ - return self._data['months'] - - @property - def quarters(self): - """Locale display names for quarters. - - >>> Locale('de', 'DE').quarters['format']['wide'][1] - u'1. Quartal' - """ - return self._data['quarters'] - - @property - def eras(self): - """Locale display names for eras. - - >>> Locale('en', 'US').eras['wide'][1] - u'Anno Domini' - >>> Locale('en', 'US').eras['abbreviated'][0] - u'BC' - """ - return self._data['eras'] - - @property - def time_zones(self): - """Locale display names for time zones. - - >>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight'] - u'British Summer Time' - >>> Locale('en', 'US').time_zones['America/St_Johns']['city'] - u'St. John\u2019s' - """ - return self._data['time_zones'] - - @property - def meta_zones(self): - """Locale display names for meta time zones. - - Meta time zones are basically groups of different Olson time zones that - have the same GMT offset and daylight savings time. - - >>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight'] - u'Central European Summer Time' - - .. versionadded:: 0.9 - """ - return self._data['meta_zones'] - - @property - def zone_formats(self): - """Patterns related to the formatting of time zones. - - >>> Locale('en', 'US').zone_formats['fallback'] - u'%(1)s (%(0)s)' - >>> Locale('pt', 'BR').zone_formats['region'] - u'Hor\\xe1rio %s' - - .. versionadded:: 0.9 - """ - return self._data['zone_formats'] - - @property - def first_week_day(self): - """The first day of a week, with 0 being Monday. - - >>> Locale('de', 'DE').first_week_day - 0 - >>> Locale('en', 'US').first_week_day - 6 - """ - return self._data['week_data']['first_day'] - - @property - def weekend_start(self): - """The day the weekend starts, with 0 being Monday. - - >>> Locale('de', 'DE').weekend_start - 5 - """ - return self._data['week_data']['weekend_start'] - - @property - def weekend_end(self): - """The day the weekend ends, with 0 being Monday. - - >>> Locale('de', 'DE').weekend_end - 6 - """ - return self._data['week_data']['weekend_end'] - - @property - def min_week_days(self): - """The minimum number of days in a week so that the week is counted as - the first week of a year or month. - - >>> Locale('de', 'DE').min_week_days - 4 - """ - return self._data['week_data']['min_days'] - - @property - def date_formats(self): - """Locale patterns for date formatting. - - >>> Locale('en', 'US').date_formats['short'] - - >>> Locale('fr', 'FR').date_formats['long'] - - """ - return self._data['date_formats'] - - @property - def time_formats(self): - """Locale patterns for time formatting. - - >>> Locale('en', 'US').time_formats['short'] - - >>> Locale('fr', 'FR').time_formats['long'] - - """ - return self._data['time_formats'] - - @property - def datetime_formats(self): - """Locale patterns for datetime formatting. - - >>> Locale('en').datetime_formats['full'] - u"{1} 'at' {0}" - >>> Locale('th').datetime_formats['medium'] - u'{1}, {0}' - """ - return self._data['datetime_formats'] - - @property - def plural_form(self): - """Plural rules for the locale. - - >>> Locale('en').plural_form(1) - 'one' - >>> Locale('en').plural_form(0) - 'other' - >>> Locale('fr').plural_form(0) - 'one' - >>> Locale('ru').plural_form(100) - 'many' - """ - return self._data['plural_form'] - - -def default_locale(category=None, aliases=LOCALE_ALIASES): - """Returns the system default locale for a given category, based on - environment variables. - - >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']: - ... os.environ[name] = '' - >>> os.environ['LANG'] = 'fr_FR.UTF-8' - >>> default_locale('LC_MESSAGES') - 'fr_FR' - - The "C" or "POSIX" pseudo-locales are treated as aliases for the - "en_US_POSIX" locale: - - >>> os.environ['LC_MESSAGES'] = 'POSIX' - >>> default_locale('LC_MESSAGES') - 'en_US_POSIX' - - The following fallbacks to the variable are always considered: - - - ``LANGUAGE`` - - ``LC_ALL`` - - ``LC_CTYPE`` - - ``LANG`` - - :param category: one of the ``LC_XXX`` environment variable names - :param aliases: a dictionary of aliases for locale identifiers - """ - varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG') - for name in filter(None, varnames): - locale = os.getenv(name) - if locale: - if name == 'LANGUAGE' and ':' in locale: - # the LANGUAGE variable may contain a colon-separated list of - # language codes; we just pick the language on the list - locale = locale.split(':')[0] - if locale in ('C', 'POSIX'): - locale = 'en_US_POSIX' - elif aliases and locale in aliases: - locale = aliases[locale] - try: - return get_locale_identifier(parse_locale(locale)) - except ValueError: - pass - - -def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES): - """Find the best match between available and requested locale strings. - - >>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT']) - 'de_DE' - >>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de']) - 'de' - - Case is ignored by the algorithm, the result uses the case of the preferred - locale identifier: - - >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) - 'de_DE' - - >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) - 'de_DE' - - By default, some web browsers unfortunately do not include the territory - in the locale identifier for many locales, and some don't even allow the - user to easily add the territory. So while you may prefer using qualified - locale identifiers in your web-application, they would not normally match - the language-only locale sent by such browsers. To workaround that, this - function uses a default mapping of commonly used langauge-only locale - identifiers to identifiers including the territory: - - >>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US']) - 'ja_JP' - - Some browsers even use an incorrect or outdated language code, such as "no" - for Norwegian, where the correct locale identifier would actually be "nb_NO" - (BokmÃ¥l) or "nn_NO" (Nynorsk). The aliases are intended to take care of - such cases, too: - - >>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE']) - 'nb_NO' - - You can override this default mapping by passing a different `aliases` - dictionary to this function, or you can bypass the behavior althogher by - setting the `aliases` parameter to `None`. - - :param preferred: the list of locale strings preferred by the user - :param available: the list of locale strings available - :param sep: character that separates the different parts of the locale - strings - :param aliases: a dictionary of aliases for locale identifiers - """ - available = [a.lower() for a in available if a] - for locale in preferred: - ll = locale.lower() - if ll in available: - return locale - if aliases: - alias = aliases.get(ll) - if alias: - alias = alias.replace('_', sep) - if alias.lower() in available: - return alias - parts = locale.split(sep) - if len(parts) > 1 and parts[0].lower() in available: - return parts[0] - return None - - -def parse_locale(identifier, sep='_'): - """Parse a locale identifier into a tuple of the form ``(language, - territory, script, variant)``. - - >>> parse_locale('zh_CN') - ('zh', 'CN', None, None) - >>> parse_locale('zh_Hans_CN') - ('zh', 'CN', 'Hans', None) - - The default component separator is "_", but a different separator can be - specified using the `sep` parameter: - - >>> parse_locale('zh-CN', sep='-') - ('zh', 'CN', None, None) - - If the identifier cannot be parsed into a locale, a `ValueError` exception - is raised: - - >>> parse_locale('not_a_LOCALE_String') - Traceback (most recent call last): - ... - ValueError: 'not_a_LOCALE_String' is not a valid locale identifier - - Encoding information and locale modifiers are removed from the identifier: - - >>> parse_locale('it_IT@euro') - ('it', 'IT', None, None) - >>> parse_locale('en_US.UTF-8') - ('en', 'US', None, None) - >>> parse_locale('de_DE.iso885915@euro') - ('de', 'DE', None, None) - - See :rfc:`4646` for more information. - - :param identifier: the locale identifier string - :param sep: character that separates the different components of the locale - identifier - :raise `ValueError`: if the string does not appear to be a valid locale - identifier - """ - if '.' in identifier: - # this is probably the charset/encoding, which we don't care about - identifier = identifier.split('.', 1)[0] - if '@' in identifier: - # this is a locale modifier such as @euro, which we don't care about - # either - identifier = identifier.split('@', 1)[0] - - parts = identifier.split(sep) - lang = parts.pop(0).lower() - if not lang.isalpha(): - raise ValueError('expected only letters, got %r' % lang) - - script = territory = variant = None - if parts: - if len(parts[0]) == 4 and parts[0].isalpha(): - script = parts.pop(0).title() - - if parts: - if len(parts[0]) == 2 and parts[0].isalpha(): - territory = parts.pop(0).upper() - elif len(parts[0]) == 3 and parts[0].isdigit(): - territory = parts.pop(0) - - if parts: - if len(parts[0]) == 4 and parts[0][0].isdigit() or \ - len(parts[0]) >= 5 and parts[0][0].isalpha(): - variant = parts.pop() - - if parts: - raise ValueError('%r is not a valid locale identifier' % identifier) - - return lang, territory, script, variant - - -def get_locale_identifier(tup, sep='_'): - """The reverse of :func:`parse_locale`. It creates a locale identifier out - of a ``(language, territory, script, variant)`` tuple. Items can be set to - ``None`` and trailing ``None``\s can also be left out of the tuple. - - >>> get_locale_identifier(('de', 'DE', None, '1999')) - 'de_DE_1999' - - .. versionadded:: 1.0 - - :param tup: the tuple as returned by :func:`parse_locale`. - :param sep: the separator for the identifier. - """ - tup = tuple(tup[:4]) - lang, territory, script, variant = tup + (None,) * (4 - len(tup)) - return sep.join(filter(None, (lang, script, territory, variant))) diff --git a/vendor/babel/dates.py b/vendor/babel/dates.py deleted file mode 100644 index 72674e8a..00000000 --- a/vendor/babel/dates.py +++ /dev/null @@ -1,1181 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.dates - ~~~~~~~~~~~ - - Locale dependent formatting and parsing of dates and times. - - The default locale for the functions in this module is determined by the - following environment variables, in that order: - - * ``LC_TIME``, - * ``LC_ALL``, and - * ``LANG`` - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import division - -import re -import pytz as _pytz - -from datetime import date, datetime, time, timedelta -from bisect import bisect_right - -from babel.core import default_locale, get_global, Locale -from babel.util import UTC, LOCALTZ -from babel._compat import string_types, integer_types, number_types - - -LC_TIME = default_locale('LC_TIME') - -# Aliases for use in scopes where the modules are shadowed by local variables -date_ = date -datetime_ = datetime -time_ = time - - -def get_timezone(zone=None): - """Looks up a timezone by name and returns it. The timezone object - returned comes from ``pytz`` and corresponds to the `tzinfo` interface and - can be used with all of the functions of Babel that operate with dates. - - If a timezone is not known a :exc:`LookupError` is raised. If `zone` - is ``None`` a local zone object is returned. - - :param zone: the name of the timezone to look up. If a timezone object - itself is passed in, mit's returned unchanged. - """ - if zone is None: - return LOCALTZ - if not isinstance(zone, string_types): - return zone - try: - return _pytz.timezone(zone) - except _pytz.UnknownTimeZoneError: - raise LookupError('Unknown timezone %s' % zone) - - -def get_next_timezone_transition(zone=None, dt=None): - """Given a timezone it will return a :class:`TimezoneTransition` object - that holds the information about the next timezone transition that's going - to happen. For instance this can be used to detect when the next DST - change is going to happen and how it looks like. - - The transition is calculated relative to the given datetime object. The - next transition that follows the date is used. If a transition cannot - be found the return value will be `None`. - - Transition information can only be provided for timezones returned by - the :func:`get_timezone` function. - - :param zone: the timezone for which the transition should be looked up. - If not provided the local timezone is used. - :param dt: the date after which the next transition should be found. - If not given the current time is assumed. - """ - zone = get_timezone(zone) - if dt is None: - dt = datetime.utcnow() - else: - dt = dt.replace(tzinfo=None) - - if not hasattr(zone, '_utc_transition_times'): - raise TypeError('Given timezone does not have UTC transition ' - 'times. This can happen because the operating ' - 'system fallback local timezone is used or a ' - 'custom timezone object') - - try: - idx = max(0, bisect_right(zone._utc_transition_times, dt)) - old_trans = zone._transition_info[idx - 1] - new_trans = zone._transition_info[idx] - old_tz = zone._tzinfos[old_trans] - new_tz = zone._tzinfos[new_trans] - except (LookupError, ValueError): - return None - - return TimezoneTransition( - activates=zone._utc_transition_times[idx], - from_tzinfo=old_tz, - to_tzinfo=new_tz, - reference_date=dt - ) - - -class TimezoneTransition(object): - """A helper object that represents the return value from - :func:`get_next_timezone_transition`. - """ - - def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None): - #: the time of the activation of the timezone transition in UTC. - self.activates = activates - #: the timezone from where the transition starts. - self.from_tzinfo = from_tzinfo - #: the timezone for after the transition. - self.to_tzinfo = to_tzinfo - #: the reference date that was provided. This is the `dt` parameter - #: to the :func:`get_next_timezone_transition`. - self.reference_date = reference_date - - @property - def from_tz(self): - """The name of the timezone before the transition.""" - return self.from_tzinfo._tzname - - @property - def to_tz(self): - """The name of the timezone after the transition.""" - return self.to_tzinfo._tzname - - @property - def from_offset(self): - """The UTC offset in seconds before the transition.""" - return int(self.from_tzinfo._utcoffset.total_seconds()) - - @property - def to_offset(self): - """The UTC offset in seconds after the transition.""" - return int(self.to_tzinfo._utcoffset.total_seconds()) - - def __repr__(self): - return ' %s (%s)>' % ( - self.from_tz, - self.to_tz, - self.activates, - ) - - -def get_period_names(locale=LC_TIME): - """Return the names for day periods (AM/PM) used by the locale. - - >>> get_period_names(locale='en_US')['am'] - u'AM' - - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).periods - - -def get_day_names(width='wide', context='format', locale=LC_TIME): - """Return the day names used by the locale for the specified format. - - >>> get_day_names('wide', locale='en_US')[1] - u'Tuesday' - >>> get_day_names('abbreviated', locale='es')[1] - u'mar' - >>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1] - u'D' - - :param width: the width to use, one of "wide", "abbreviated", or "narrow" - :param context: the context, either "format" or "stand-alone" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).days[context][width] - - -def get_month_names(width='wide', context='format', locale=LC_TIME): - """Return the month names used by the locale for the specified format. - - >>> get_month_names('wide', locale='en_US')[1] - u'January' - >>> get_month_names('abbreviated', locale='es')[1] - u'ene' - >>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1] - u'J' - - :param width: the width to use, one of "wide", "abbreviated", or "narrow" - :param context: the context, either "format" or "stand-alone" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).months[context][width] - - -def get_quarter_names(width='wide', context='format', locale=LC_TIME): - """Return the quarter names used by the locale for the specified format. - - >>> get_quarter_names('wide', locale='en_US')[1] - u'1st quarter' - >>> get_quarter_names('abbreviated', locale='de_DE')[1] - u'Q1' - - :param width: the width to use, one of "wide", "abbreviated", or "narrow" - :param context: the context, either "format" or "stand-alone" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).quarters[context][width] - - -def get_era_names(width='wide', locale=LC_TIME): - """Return the era names used by the locale for the specified format. - - >>> get_era_names('wide', locale='en_US')[1] - u'Anno Domini' - >>> get_era_names('abbreviated', locale='de_DE')[1] - u'n. Chr.' - - :param width: the width to use, either "wide", "abbreviated", or "narrow" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).eras[width] - - -def get_date_format(format='medium', locale=LC_TIME): - """Return the date formatting patterns used by the locale for the specified - format. - - >>> get_date_format(locale='en_US') - - >>> get_date_format('full', locale='de_DE') - - - :param format: the format to use, one of "full", "long", "medium", or - "short" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).date_formats[format] - - -def get_datetime_format(format='medium', locale=LC_TIME): - """Return the datetime formatting patterns used by the locale for the - specified format. - - >>> get_datetime_format(locale='en_US') - u'{1}, {0}' - - :param format: the format to use, one of "full", "long", "medium", or - "short" - :param locale: the `Locale` object, or a locale string - """ - patterns = Locale.parse(locale).datetime_formats - if format not in patterns: - format = None - return patterns[format] - - -def get_time_format(format='medium', locale=LC_TIME): - """Return the time formatting patterns used by the locale for the specified - format. - - >>> get_time_format(locale='en_US') - - >>> get_time_format('full', locale='de_DE') - - - :param format: the format to use, one of "full", "long", "medium", or - "short" - :param locale: the `Locale` object, or a locale string - """ - return Locale.parse(locale).time_formats[format] - - -def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME): - """Return the timezone associated with the given `datetime` object formatted - as string indicating the offset from GMT. - - >>> dt = datetime(2007, 4, 1, 15, 30) - >>> get_timezone_gmt(dt, locale='en') - u'GMT+00:00' - - >>> tz = get_timezone('America/Los_Angeles') - >>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz) - >>> get_timezone_gmt(dt, locale='en') - u'GMT-08:00' - >>> get_timezone_gmt(dt, 'short', locale='en') - u'-0800' - - The long format depends on the locale, for example in France the acronym - UTC string is used instead of GMT: - - >>> get_timezone_gmt(dt, 'long', locale='fr_FR') - u'UTC-08:00' - - .. versionadded:: 0.9 - - :param datetime: the ``datetime`` object; if `None`, the current date and - time in UTC is used - :param width: either "long" or "short" - :param locale: the `Locale` object, or a locale string - """ - if datetime is None: - datetime = datetime_.utcnow() - elif isinstance(datetime, integer_types): - datetime = datetime_.utcfromtimestamp(datetime).time() - if datetime.tzinfo is None: - datetime = datetime.replace(tzinfo=UTC) - locale = Locale.parse(locale) - - offset = datetime.tzinfo.utcoffset(datetime) - seconds = offset.days * 24 * 60 * 60 + offset.seconds - hours, seconds = divmod(seconds, 3600) - if width == 'short': - pattern = u'%+03d%02d' - else: - pattern = locale.zone_formats['gmt'] % '%+03d:%02d' - return pattern % (hours, seconds // 60) - - -def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME): - """Return a representation of the given timezone using "location format". - - The result depends on both the local display name of the country and the - city associated with the time zone: - - >>> tz = get_timezone('America/St_Johns') - >>> get_timezone_location(tz, locale='de_DE') - u"Kanada (St. John's) Zeit" - >>> tz = get_timezone('America/Mexico_City') - >>> get_timezone_location(tz, locale='de_DE') - u'Mexiko (Mexiko-Stadt) Zeit' - - If the timezone is associated with a country that uses only a single - timezone, just the localized country name is returned: - - >>> tz = get_timezone('Europe/Berlin') - >>> get_timezone_name(tz, locale='de_DE') - u'Mitteleurop\\xe4ische Zeit' - - .. versionadded:: 0.9 - - :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines - the timezone; if `None`, the current date and time in - UTC is assumed - :param locale: the `Locale` object, or a locale string - :return: the localized timezone name using location format - """ - if dt_or_tzinfo is None: - dt = datetime.now() - tzinfo = LOCALTZ - elif isinstance(dt_or_tzinfo, string_types): - dt = None - tzinfo = get_timezone(dt_or_tzinfo) - elif isinstance(dt_or_tzinfo, integer_types): - dt = None - tzinfo = UTC - elif isinstance(dt_or_tzinfo, (datetime, time)): - dt = dt_or_tzinfo - if dt.tzinfo is not None: - tzinfo = dt.tzinfo - else: - tzinfo = UTC - else: - dt = None - tzinfo = dt_or_tzinfo - locale = Locale.parse(locale) - - if hasattr(tzinfo, 'zone'): - zone = tzinfo.zone - else: - zone = tzinfo.tzname(dt or datetime.utcnow()) - - # Get the canonical time-zone code - zone = get_global('zone_aliases').get(zone, zone) - - info = locale.time_zones.get(zone, {}) - - # Otherwise, if there is only one timezone for the country, return the - # localized country name - region_format = locale.zone_formats['region'] - territory = get_global('zone_territories').get(zone) - if territory not in locale.territories: - territory = 'ZZ' # invalid/unknown - territory_name = locale.territories[territory] - if territory and len(get_global('territory_zones').get(territory, [])) == 1: - return region_format % (territory_name) - - # Otherwise, include the city in the output - fallback_format = locale.zone_formats['fallback'] - if 'city' in info: - city_name = info['city'] - else: - metazone = get_global('meta_zones').get(zone) - metazone_info = locale.meta_zones.get(metazone, {}) - if 'city' in metazone_info: - city_name = metazone_info['city'] - elif '/' in zone: - city_name = zone.split('/', 1)[1].replace('_', ' ') - else: - city_name = zone.replace('_', ' ') - - return region_format % (fallback_format % { - '0': city_name, - '1': territory_name - }) - - -def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False, - locale=LC_TIME, zone_variant=None): - r"""Return the localized display name for the given timezone. The timezone - may be specified using a ``datetime`` or `tzinfo` object. - - >>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles')) - >>> get_timezone_name(dt, locale='en_US') - u'Pacific Standard Time' - >>> get_timezone_name(dt, width='short', locale='en_US') - u'PST' - - If this function gets passed only a `tzinfo` object and no concrete - `datetime`, the returned display name is indenpendent of daylight savings - time. This can be used for example for selecting timezones, or to set the - time of events that recur across DST changes: - - >>> tz = get_timezone('America/Los_Angeles') - >>> get_timezone_name(tz, locale='en_US') - u'Pacific Time' - >>> get_timezone_name(tz, 'short', locale='en_US') - u'PT' - - If no localized display name for the timezone is available, and the timezone - is associated with a country that uses only a single timezone, the name of - that country is returned, formatted according to the locale: - - >>> tz = get_timezone('Europe/Berlin') - >>> get_timezone_name(tz, locale='de_DE') - u'Mitteleurop\xe4ische Zeit' - >>> get_timezone_name(tz, locale='pt_BR') - u'Hor\xe1rio da Europa Central' - - On the other hand, if the country uses multiple timezones, the city is also - included in the representation: - - >>> tz = get_timezone('America/St_Johns') - >>> get_timezone_name(tz, locale='de_DE') - u'Neufundland-Zeit' - - Note that short format is currently not supported for all timezones and - all locales. This is partially because not every timezone has a short - code in every locale. In that case it currently falls back to the long - format. - - For more information see `LDML Appendix J: Time Zone Display Names - `_ - - .. versionadded:: 0.9 - - .. versionchanged:: 1.0 - Added `zone_variant` support. - - :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines - the timezone; if a ``tzinfo`` object is used, the - resulting display name will be generic, i.e. - independent of daylight savings time; if `None`, the - current date in UTC is assumed - :param width: either "long" or "short" - :param uncommon: deprecated and ignored - :param zone_variant: defines the zone variation to return. By default the - variation is defined from the datetime object - passed in. If no datetime object is passed in, the - ``'generic'`` variation is assumed. The following - values are valid: ``'generic'``, ``'daylight'`` and - ``'standard'``. - :param locale: the `Locale` object, or a locale string - """ - if dt_or_tzinfo is None: - dt = datetime.now() - tzinfo = LOCALTZ - elif isinstance(dt_or_tzinfo, string_types): - dt = None - tzinfo = get_timezone(dt_or_tzinfo) - elif isinstance(dt_or_tzinfo, integer_types): - dt = None - tzinfo = UTC - elif isinstance(dt_or_tzinfo, (datetime, time)): - dt = dt_or_tzinfo - if dt.tzinfo is not None: - tzinfo = dt.tzinfo - else: - tzinfo = UTC - else: - dt = None - tzinfo = dt_or_tzinfo - locale = Locale.parse(locale) - - if hasattr(tzinfo, 'zone'): - zone = tzinfo.zone - else: - zone = tzinfo.tzname(dt) - - if zone_variant is None: - if dt is None: - zone_variant = 'generic' - else: - dst = tzinfo.dst(dt) - if dst: - zone_variant = 'daylight' - else: - zone_variant = 'standard' - else: - if zone_variant not in ('generic', 'standard', 'daylight'): - raise ValueError('Invalid zone variation') - - # Get the canonical time-zone code - zone = get_global('zone_aliases').get(zone, zone) - - info = locale.time_zones.get(zone, {}) - # Try explicitly translated zone names first - if width in info: - if zone_variant in info[width]: - return info[width][zone_variant] - - metazone = get_global('meta_zones').get(zone) - if metazone: - metazone_info = locale.meta_zones.get(metazone, {}) - if width in metazone_info: - if zone_variant in metazone_info[width]: - return metazone_info[width][zone_variant] - - # If we have a concrete datetime, we assume that the result can't be - # independent of daylight savings time, so we return the GMT offset - if dt is not None: - return get_timezone_gmt(dt, width=width, locale=locale) - - return get_timezone_location(dt_or_tzinfo, locale=locale) - - -def format_date(date=None, format='medium', locale=LC_TIME): - """Return a date formatted according to the given pattern. - - >>> d = date(2007, 04, 01) - >>> format_date(d, locale='en_US') - u'Apr 1, 2007' - >>> format_date(d, format='full', locale='de_DE') - u'Sonntag, 1. April 2007' - - If you don't want to use the locale default formats, you can specify a - custom date pattern: - - >>> format_date(d, "EEE, MMM d, ''yy", locale='en') - u"Sun, Apr 1, '07" - - :param date: the ``date`` or ``datetime`` object; if `None`, the current - date is used - :param format: one of "full", "long", "medium", or "short", or a custom - date/time pattern - :param locale: a `Locale` object or a locale identifier - """ - if date is None: - date = date_.today() - elif isinstance(date, datetime): - date = date.date() - - locale = Locale.parse(locale) - if format in ('full', 'long', 'medium', 'short'): - format = get_date_format(format, locale=locale) - pattern = parse_pattern(format) - return pattern.apply(date, locale) - - -def format_datetime(datetime=None, format='medium', tzinfo=None, - locale=LC_TIME): - r"""Return a date formatted according to the given pattern. - - >>> dt = datetime(2007, 04, 01, 15, 30) - >>> format_datetime(dt, locale='en_US') - u'Apr 1, 2007, 3:30:00 PM' - - For any pattern requiring the display of the time-zone, the third-party - ``pytz`` package is needed to explicitly specify the time-zone: - - >>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'), - ... locale='fr_FR') - u'dimanche 1 avril 2007 17:30:00 heure avanc\xe9e d\u2019Europe centrale' - >>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz", - ... tzinfo=get_timezone('US/Eastern'), locale='en') - u'2007.04.01 AD at 11:30:00 EDT' - - :param datetime: the `datetime` object; if `None`, the current date and - time is used - :param format: one of "full", "long", "medium", or "short", or a custom - date/time pattern - :param tzinfo: the timezone to apply to the time for display - :param locale: a `Locale` object or a locale identifier - """ - if datetime is None: - datetime = datetime_.utcnow() - elif isinstance(datetime, number_types): - datetime = datetime_.utcfromtimestamp(datetime) - elif isinstance(datetime, time): - datetime = datetime_.combine(date.today(), datetime) - if datetime.tzinfo is None: - datetime = datetime.replace(tzinfo=UTC) - if tzinfo is not None: - datetime = datetime.astimezone(get_timezone(tzinfo)) - if hasattr(tzinfo, 'normalize'): # pytz - datetime = tzinfo.normalize(datetime) - - locale = Locale.parse(locale) - if format in ('full', 'long', 'medium', 'short'): - return get_datetime_format(format, locale=locale) \ - .replace("'", "") \ - .replace('{0}', format_time(datetime, format, tzinfo=None, - locale=locale)) \ - .replace('{1}', format_date(datetime, format, locale=locale)) - else: - return parse_pattern(format).apply(datetime, locale) - - -def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME): - r"""Return a time formatted according to the given pattern. - - >>> t = time(15, 30) - >>> format_time(t, locale='en_US') - u'3:30:00 PM' - >>> format_time(t, format='short', locale='de_DE') - u'15:30' - - If you don't want to use the locale default formats, you can specify a - custom time pattern: - - >>> format_time(t, "hh 'o''clock' a", locale='en') - u"03 o'clock PM" - - For any pattern requiring the display of the time-zone a - timezone has to be specified explicitly: - - >>> t = datetime(2007, 4, 1, 15, 30) - >>> tzinfo = get_timezone('Europe/Paris') - >>> t = tzinfo.localize(t) - >>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR') - u'15:30:00 heure avanc\xe9e d\u2019Europe centrale' - >>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'), - ... locale='en') - u"09 o'clock AM, Eastern Daylight Time" - - As that example shows, when this function gets passed a - ``datetime.datetime`` value, the actual time in the formatted string is - adjusted to the timezone specified by the `tzinfo` parameter. If the - ``datetime`` is "naive" (i.e. it has no associated timezone information), - it is assumed to be in UTC. - - These timezone calculations are **not** performed if the value is of type - ``datetime.time``, as without date information there's no way to determine - what a given time would translate to in a different timezone without - information about whether daylight savings time is in effect or not. This - means that time values are left as-is, and the value of the `tzinfo` - parameter is only used to display the timezone name if needed: - - >>> t = time(15, 30) - >>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'), - ... locale='fr_FR') - u'15:30:00 heure normale de l\u2019Europe centrale' - >>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'), - ... locale='en_US') - u'3:30:00 PM Eastern Standard Time' - - :param time: the ``time`` or ``datetime`` object; if `None`, the current - time in UTC is used - :param format: one of "full", "long", "medium", or "short", or a custom - date/time pattern - :param tzinfo: the time-zone to apply to the time for display - :param locale: a `Locale` object or a locale identifier - """ - if time is None: - time = datetime.utcnow() - elif isinstance(time, number_types): - time = datetime.utcfromtimestamp(time) - if time.tzinfo is None: - time = time.replace(tzinfo=UTC) - if isinstance(time, datetime): - if tzinfo is not None: - time = time.astimezone(tzinfo) - if hasattr(tzinfo, 'normalize'): # pytz - time = tzinfo.normalize(time) - time = time.timetz() - elif tzinfo is not None: - time = time.replace(tzinfo=tzinfo) - - locale = Locale.parse(locale) - if format in ('full', 'long', 'medium', 'short'): - format = get_time_format(format, locale=locale) - return parse_pattern(format).apply(time, locale) - - -TIMEDELTA_UNITS = ( - ('year', 3600 * 24 * 365), - ('month', 3600 * 24 * 30), - ('week', 3600 * 24 * 7), - ('day', 3600 * 24), - ('hour', 3600), - ('minute', 60), - ('second', 1) -) - - -def format_timedelta(delta, granularity='second', threshold=.85, - add_direction=False, format='medium', - locale=LC_TIME): - """Return a time delta according to the rules of the given locale. - - >>> format_timedelta(timedelta(weeks=12), locale='en_US') - u'3 months' - >>> format_timedelta(timedelta(seconds=1), locale='es') - u'1 segundo' - - The granularity parameter can be provided to alter the lowest unit - presented, which defaults to a second. - - >>> format_timedelta(timedelta(hours=3), granularity='day', - ... locale='en_US') - u'1 day' - - The threshold parameter can be used to determine at which value the - presentation switches to the next higher unit. A higher threshold factor - means the presentation will switch later. For example: - - >>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US') - u'1 day' - >>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US') - u'23 hours' - - In addition directional information can be provided that informs - the user if the date is in the past or in the future: - - >>> format_timedelta(timedelta(hours=1), add_direction=True) - u'In 1 hour' - >>> format_timedelta(timedelta(hours=-1), add_direction=True) - u'1 hour ago' - - :param delta: a ``timedelta`` object representing the time difference to - format, or the delta in seconds as an `int` value - :param granularity: determines the smallest unit that should be displayed, - the value can be one of "year", "month", "week", "day", - "hour", "minute" or "second" - :param threshold: factor that determines at which point the presentation - switches to the next higher unit - :param add_direction: if this flag is set to `True` the return value will - include directional information. For instance a - positive timedelta will include the information about - it being in the future, a negative will be information - about the value being in the past. - :param format: the format (currently only "medium" and "short" are supported) - :param locale: a `Locale` object or a locale identifier - """ - if format not in ('short', 'medium'): - raise TypeError('Format can only be one of "short" or "medium"') - if isinstance(delta, timedelta): - seconds = int((delta.days * 86400) + delta.seconds) - else: - seconds = delta - locale = Locale.parse(locale) - - def _iter_choices(unit): - if add_direction: - if seconds >= 0: - yield unit + '-future' - else: - yield unit + '-past' - yield unit + ':' + format - yield unit - - for unit, secs_per_unit in TIMEDELTA_UNITS: - value = abs(seconds) / secs_per_unit - if value >= threshold or unit == granularity: - if unit == granularity and value > 0: - value = max(1, value) - value = int(round(value)) - plural_form = locale.plural_form(value) - pattern = None - for choice in _iter_choices(unit): - patterns = locale._data['unit_patterns'].get(choice) - if patterns is not None: - pattern = patterns[plural_form] - break - # This really should not happen - if pattern is None: - return u'' - return pattern.replace('{0}', str(value)) - - return u'' - - -def parse_date(string, locale=LC_TIME): - """Parse a date from a string. - - This function uses the date format for the locale as a hint to determine - the order in which the date fields appear in the string. - - >>> parse_date('4/1/04', locale='en_US') - datetime.date(2004, 4, 1) - >>> parse_date('01.04.2004', locale='de_DE') - datetime.date(2004, 4, 1) - - :param string: the string containing the date - :param locale: a `Locale` object or a locale identifier - """ - # TODO: try ISO format first? - format = get_date_format(locale=locale).pattern.lower() - year_idx = format.index('y') - month_idx = format.index('m') - if month_idx < 0: - month_idx = format.index('l') - day_idx = format.index('d') - - indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')] - indexes.sort() - indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) - - # FIXME: this currently only supports numbers, but should also support month - # names, both in the requested locale, and english - - numbers = re.findall('(\d+)', string) - year = numbers[indexes['Y']] - if len(year) == 2: - year = 2000 + int(year) - else: - year = int(year) - month = int(numbers[indexes['M']]) - day = int(numbers[indexes['D']]) - if month > 12: - month, day = day, month - return date(year, month, day) - - -def parse_time(string, locale=LC_TIME): - """Parse a time from a string. - - This function uses the time format for the locale as a hint to determine - the order in which the time fields appear in the string. - - >>> parse_time('15:30:00', locale='en_US') - datetime.time(15, 30) - - :param string: the string containing the time - :param locale: a `Locale` object or a locale identifier - :return: the parsed time - :rtype: `time` - """ - # TODO: try ISO format first? - format = get_time_format(locale=locale).pattern.lower() - hour_idx = format.index('h') - if hour_idx < 0: - hour_idx = format.index('k') - min_idx = format.index('m') - sec_idx = format.index('s') - - indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')] - indexes.sort() - indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) - - # FIXME: support 12 hour clock, and 0-based hour specification - # and seconds should be optional, maybe minutes too - # oh, and time-zones, of course - - numbers = re.findall('(\d+)', string) - hour = int(numbers[indexes['H']]) - minute = int(numbers[indexes['M']]) - second = int(numbers[indexes['S']]) - return time(hour, minute, second) - - -class DateTimePattern(object): - - def __init__(self, pattern, format): - self.pattern = pattern - self.format = format - - def __repr__(self): - return '<%s %r>' % (type(self).__name__, self.pattern) - - def __unicode__(self): - return self.pattern - - def __mod__(self, other): - if type(other) is not DateTimeFormat: - return NotImplemented - return self.format % other - - def apply(self, datetime, locale): - return self % DateTimeFormat(datetime, locale) - - -class DateTimeFormat(object): - - def __init__(self, value, locale): - assert isinstance(value, (date, datetime, time)) - if isinstance(value, (datetime, time)) and value.tzinfo is None: - value = value.replace(tzinfo=UTC) - self.value = value - self.locale = Locale.parse(locale) - - def __getitem__(self, name): - char = name[0] - num = len(name) - if char == 'G': - return self.format_era(char, num) - elif char in ('y', 'Y', 'u'): - return self.format_year(char, num) - elif char in ('Q', 'q'): - return self.format_quarter(char, num) - elif char in ('M', 'L'): - return self.format_month(char, num) - elif char in ('w', 'W'): - return self.format_week(char, num) - elif char == 'd': - return self.format(self.value.day, num) - elif char == 'D': - return self.format_day_of_year(num) - elif char == 'F': - return self.format_day_of_week_in_month() - elif char in ('E', 'e', 'c'): - return self.format_weekday(char, num) - elif char == 'a': - return self.format_period(char) - elif char == 'h': - if self.value.hour % 12 == 0: - return self.format(12, num) - else: - return self.format(self.value.hour % 12, num) - elif char == 'H': - return self.format(self.value.hour, num) - elif char == 'K': - return self.format(self.value.hour % 12, num) - elif char == 'k': - if self.value.hour == 0: - return self.format(24, num) - else: - return self.format(self.value.hour, num) - elif char == 'm': - return self.format(self.value.minute, num) - elif char == 's': - return self.format(self.value.second, num) - elif char == 'S': - return self.format_frac_seconds(num) - elif char == 'A': - return self.format_milliseconds_in_day(num) - elif char in ('z', 'Z', 'v', 'V'): - return self.format_timezone(char, num) - else: - raise KeyError('Unsupported date/time field %r' % char) - - def format_era(self, char, num): - width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)] - era = int(self.value.year >= 0) - return get_era_names(width, self.locale)[era] - - def format_year(self, char, num): - value = self.value.year - if char.isupper(): - week = self.get_week_number(self.get_day_of_year()) - if week == 0: - value -= 1 - year = self.format(value, num) - if num == 2: - year = year[-2:] - return year - - def format_quarter(self, char, num): - quarter = (self.value.month - 1) // 3 + 1 - if num <= 2: - return ('%%0%dd' % num) % quarter - width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] - context = {'Q': 'format', 'q': 'stand-alone'}[char] - return get_quarter_names(width, context, self.locale)[quarter] - - def format_month(self, char, num): - if num <= 2: - return ('%%0%dd' % num) % self.value.month - width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] - context = {'M': 'format', 'L': 'stand-alone'}[char] - return get_month_names(width, context, self.locale)[self.value.month] - - def format_week(self, char, num): - if char.islower(): # week of year - day_of_year = self.get_day_of_year() - week = self.get_week_number(day_of_year) - if week == 0: - date = self.value - timedelta(days=day_of_year) - week = self.get_week_number(self.get_day_of_year(date), - date.weekday()) - return self.format(week, num) - else: # week of month - week = self.get_week_number(self.value.day) - if week == 0: - date = self.value - timedelta(days=self.value.day) - week = self.get_week_number(date.day, date.weekday()) - pass - return '%d' % week - - def format_weekday(self, char, num): - if num < 3: - if char.islower(): - value = 7 - self.locale.first_week_day + self.value.weekday() - return self.format(value % 7 + 1, num) - num = 3 - weekday = self.value.weekday() - width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] - context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num] - return get_day_names(width, context, self.locale)[weekday] - - def format_day_of_year(self, num): - return self.format(self.get_day_of_year(), num) - - def format_day_of_week_in_month(self): - return '%d' % ((self.value.day - 1) // 7 + 1) - - def format_period(self, char): - period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)] - return get_period_names(locale=self.locale)[period] - - def format_frac_seconds(self, num): - value = str(self.value.microsecond) - return self.format(round(float('.%s' % value), num) * 10**num, num) - - def format_milliseconds_in_day(self, num): - msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \ - self.value.minute * 60000 + self.value.hour * 3600000 - return self.format(msecs, num) - - def format_timezone(self, char, num): - width = {3: 'short', 4: 'long'}[max(3, num)] - if char == 'z': - return get_timezone_name(self.value, width, locale=self.locale) - elif char == 'Z': - return get_timezone_gmt(self.value, width, locale=self.locale) - elif char == 'v': - return get_timezone_name(self.value.tzinfo, width, - locale=self.locale) - elif char == 'V': - if num == 1: - return get_timezone_name(self.value.tzinfo, width, - uncommon=True, locale=self.locale) - return get_timezone_location(self.value.tzinfo, locale=self.locale) - - def format(self, value, length): - return ('%%0%dd' % length) % value - - def get_day_of_year(self, date=None): - if date is None: - date = self.value - return (date - date.replace(month=1, day=1)).days + 1 - - def get_week_number(self, day_of_period, day_of_week=None): - """Return the number of the week of a day within a period. This may be - the week number in a year or the week number in a month. - - Usually this will return a value equal to or greater than 1, but if the - first week of the period is so short that it actually counts as the last - week of the previous period, this function will return 0. - - >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE')) - >>> format.get_week_number(6) - 1 - - >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US')) - >>> format.get_week_number(6) - 2 - - :param day_of_period: the number of the day in the period (usually - either the day of month or the day of year) - :param day_of_week: the week day; if ommitted, the week day of the - current date is assumed - """ - if day_of_week is None: - day_of_week = self.value.weekday() - first_day = (day_of_week - self.locale.first_week_day - - day_of_period + 1) % 7 - if first_day < 0: - first_day += 7 - week_number = (day_of_period + first_day - 1) // 7 - if 7 - first_day >= self.locale.min_week_days: - week_number += 1 - return week_number - - -PATTERN_CHARS = { - 'G': [1, 2, 3, 4, 5], # era - 'y': None, 'Y': None, 'u': None, # year - 'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter - 'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month - 'w': [1, 2], 'W': [1], # week - 'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day - 'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day - 'a': [1], # period - 'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour - 'm': [1, 2], # minute - 's': [1, 2], 'S': None, 'A': None, # second - 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone -} - - -def parse_pattern(pattern): - """Parse date, time, and datetime format patterns. - - >>> parse_pattern("MMMMd").format - u'%(MMMM)s%(d)s' - >>> parse_pattern("MMM d, yyyy").format - u'%(MMM)s %(d)s, %(yyyy)s' - - Pattern can contain literal strings in single quotes: - - >>> parse_pattern("H:mm' Uhr 'z").format - u'%(H)s:%(mm)s Uhr %(z)s' - - An actual single quote can be used by using two adjacent single quote - characters: - - >>> parse_pattern("hh' o''clock'").format - u"%(hh)s o'clock" - - :param pattern: the formatting pattern to parse - """ - if type(pattern) is DateTimePattern: - return pattern - - result = [] - quotebuf = None - charbuf = [] - fieldchar = [''] - fieldnum = [0] - - def append_chars(): - result.append(''.join(charbuf).replace('%', '%%')) - del charbuf[:] - - def append_field(): - limit = PATTERN_CHARS[fieldchar[0]] - if limit and fieldnum[0] not in limit: - raise ValueError('Invalid length for field: %r' - % (fieldchar[0] * fieldnum[0])) - result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0])) - fieldchar[0] = '' - fieldnum[0] = 0 - - for idx, char in enumerate(pattern.replace("''", '\0')): - if quotebuf is None: - if char == "'": # quote started - if fieldchar[0]: - append_field() - elif charbuf: - append_chars() - quotebuf = [] - elif char in PATTERN_CHARS: - if charbuf: - append_chars() - if char == fieldchar[0]: - fieldnum[0] += 1 - else: - if fieldchar[0]: - append_field() - fieldchar[0] = char - fieldnum[0] = 1 - else: - if fieldchar[0]: - append_field() - charbuf.append(char) - - elif quotebuf is not None: - if char == "'": # end of quote - charbuf.extend(quotebuf) - quotebuf = None - else: # inside quote - quotebuf.append(char) - - if fieldchar[0]: - append_field() - elif charbuf: - append_chars() - - return DateTimePattern(pattern, u''.join(result).replace('\0', "'")) diff --git a/vendor/babel/global.dat b/vendor/babel/global.dat deleted file mode 100644 index 82cbbae1..00000000 Binary files a/vendor/babel/global.dat and /dev/null differ diff --git a/vendor/babel/localedata.py b/vendor/babel/localedata.py deleted file mode 100644 index 88883ac8..00000000 --- a/vendor/babel/localedata.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.localedata - ~~~~~~~~~~~~~~~~ - - Low-level locale data access. - - :note: The `Locale` class, which uses this module under the hood, provides a - more convenient interface for accessing the locale data. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -import os -import threading -from collections import MutableMapping - -from babel._compat import pickle - - -_cache = {} -_cache_lock = threading.RLock() -_dirname = os.path.join(os.path.dirname(__file__), 'localedata') - - -def exists(name): - """Check whether locale data is available for the given locale. Ther - return value is `True` if it exists, `False` otherwise. - - :param name: the locale identifier string - """ - if name in _cache: - return True - return os.path.exists(os.path.join(_dirname, '%s.dat' % name)) - - -def locale_identifiers(): - """Return a list of all locale identifiers for which locale data is - available. - - .. versionadded:: 0.8.1 - - :return: a list of locale identifiers (strings) - """ - return [stem for stem, extension in [ - os.path.splitext(filename) for filename in os.listdir(_dirname) - ] if extension == '.dat' and stem != 'root'] - - -def load(name, merge_inherited=True): - """Load the locale data for the given locale. - - The locale data is a dictionary that contains much of the data defined by - the Common Locale Data Repository (CLDR). This data is stored as a - collection of pickle files inside the ``babel`` package. - - >>> d = load('en_US') - >>> d['languages']['sv'] - u'Swedish' - - Note that the results are cached, and subsequent requests for the same - locale return the same dictionary: - - >>> d1 = load('en_US') - >>> d2 = load('en_US') - >>> d1 is d2 - True - - :param name: the locale identifier string (or "root") - :param merge_inherited: whether the inherited data should be merged into - the data of the requested locale - :raise `IOError`: if no locale data file is found for the given locale - identifer, or one of the locales it inherits from - """ - _cache_lock.acquire() - try: - data = _cache.get(name) - if not data: - # Load inherited data - if name == 'root' or not merge_inherited: - data = {} - else: - parts = name.split('_') - if len(parts) == 1: - parent = 'root' - else: - parent = '_'.join(parts[:-1]) - data = load(parent).copy() - filename = os.path.join(_dirname, '%s.dat' % name) - fileobj = open(filename, 'rb') - try: - if name != 'root' and merge_inherited: - merge(data, pickle.load(fileobj)) - else: - data = pickle.load(fileobj) - _cache[name] = data - finally: - fileobj.close() - return data - finally: - _cache_lock.release() - - -def merge(dict1, dict2): - """Merge the data from `dict2` into the `dict1` dictionary, making copies - of nested dictionaries. - - >>> d = {1: 'foo', 3: 'baz'} - >>> merge(d, {1: 'Foo', 2: 'Bar'}) - >>> items = d.items(); items.sort(); items - [(1, 'Foo'), (2, 'Bar'), (3, 'baz')] - - :param dict1: the dictionary to merge into - :param dict2: the dictionary containing the data that should be merged - """ - for key, val2 in dict2.items(): - if val2 is not None: - val1 = dict1.get(key) - if isinstance(val2, dict): - if val1 is None: - val1 = {} - if isinstance(val1, Alias): - val1 = (val1, val2) - elif isinstance(val1, tuple): - alias, others = val1 - others = others.copy() - merge(others, val2) - val1 = (alias, others) - else: - val1 = val1.copy() - merge(val1, val2) - else: - val1 = val2 - dict1[key] = val1 - - -class Alias(object): - """Representation of an alias in the locale data. - - An alias is a value that refers to some other part of the locale data, - as specified by the `keys`. - """ - - def __init__(self, keys): - self.keys = tuple(keys) - - def __repr__(self): - return '<%s %r>' % (type(self).__name__, self.keys) - - def resolve(self, data): - """Resolve the alias based on the given data. - - This is done recursively, so if one alias resolves to a second alias, - that second alias will also be resolved. - - :param data: the locale data - :type data: `dict` - """ - base = data - for key in self.keys: - data = data[key] - if isinstance(data, Alias): - data = data.resolve(base) - elif isinstance(data, tuple): - alias, others = data - data = alias.resolve(base) - return data - - -class LocaleDataDict(MutableMapping): - """Dictionary wrapper that automatically resolves aliases to the actual - values. - """ - - def __init__(self, data, base=None): - self._data = data - if base is None: - base = data - self.base = base - - def __len__(self): - return len(self._data) - - def __iter__(self): - return iter(self._data) - - def __getitem__(self, key): - orig = val = self._data[key] - if isinstance(val, Alias): # resolve an alias - val = val.resolve(self.base) - if isinstance(val, tuple): # Merge a partial dict with an alias - alias, others = val - val = alias.resolve(self.base).copy() - merge(val, others) - if type(val) is dict: # Return a nested alias-resolving dict - val = LocaleDataDict(val, base=self.base) - if val is not orig: - self._data[key] = val - return val - - def __setitem__(self, key, value): - self._data[key] = value - - def __delitem__(self, key): - del self._data[key] - - def copy(self): - return LocaleDataDict(self._data.copy(), base=self.base) diff --git a/vendor/babel/localedata/aa.dat b/vendor/babel/localedata/aa.dat deleted file mode 100644 index 7a5e4987..00000000 Binary files a/vendor/babel/localedata/aa.dat and /dev/null differ diff --git a/vendor/babel/localedata/aa_DJ.dat b/vendor/babel/localedata/aa_DJ.dat deleted file mode 100644 index ceafe155..00000000 Binary files a/vendor/babel/localedata/aa_DJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/aa_ER.dat b/vendor/babel/localedata/aa_ER.dat deleted file mode 100644 index 61ff1648..00000000 Binary files a/vendor/babel/localedata/aa_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/aa_ET.dat b/vendor/babel/localedata/aa_ET.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/aa_ET.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/af.dat b/vendor/babel/localedata/af.dat deleted file mode 100644 index b17f184a..00000000 Binary files a/vendor/babel/localedata/af.dat and /dev/null differ diff --git a/vendor/babel/localedata/af_NA.dat b/vendor/babel/localedata/af_NA.dat deleted file mode 100644 index c915eedc..00000000 Binary files a/vendor/babel/localedata/af_NA.dat and /dev/null differ diff --git a/vendor/babel/localedata/af_ZA.dat b/vendor/babel/localedata/af_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/af_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/agq.dat b/vendor/babel/localedata/agq.dat deleted file mode 100644 index 0c57e93b..00000000 Binary files a/vendor/babel/localedata/agq.dat and /dev/null differ diff --git a/vendor/babel/localedata/agq_CM.dat b/vendor/babel/localedata/agq_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/agq_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/ak.dat b/vendor/babel/localedata/ak.dat deleted file mode 100644 index fa10f57b..00000000 Binary files a/vendor/babel/localedata/ak.dat and /dev/null differ diff --git a/vendor/babel/localedata/ak_GH.dat b/vendor/babel/localedata/ak_GH.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ak_GH.dat and /dev/null differ diff --git a/vendor/babel/localedata/am.dat b/vendor/babel/localedata/am.dat deleted file mode 100644 index 4bb060eb..00000000 Binary files a/vendor/babel/localedata/am.dat and /dev/null differ diff --git a/vendor/babel/localedata/am_ET.dat b/vendor/babel/localedata/am_ET.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/am_ET.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar.dat b/vendor/babel/localedata/ar.dat deleted file mode 100644 index 79d92cc7..00000000 Binary files a/vendor/babel/localedata/ar.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_001.dat b/vendor/babel/localedata/ar_001.dat deleted file mode 100644 index 5b852b0b..00000000 Binary files a/vendor/babel/localedata/ar_001.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_AE.dat b/vendor/babel/localedata/ar_AE.dat deleted file mode 100644 index 72b45424..00000000 --- a/vendor/babel/localedata/ar_AE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_BH.dat b/vendor/babel/localedata/ar_BH.dat deleted file mode 100644 index 72b45424..00000000 --- a/vendor/babel/localedata/ar_BH.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_DJ.dat b/vendor/babel/localedata/ar_DJ.dat deleted file mode 100644 index b0ee4421..00000000 Binary files a/vendor/babel/localedata/ar_DJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_DZ.dat b/vendor/babel/localedata/ar_DZ.dat deleted file mode 100644 index bf61a6fb..00000000 Binary files a/vendor/babel/localedata/ar_DZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_EG.dat b/vendor/babel/localedata/ar_EG.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/ar_EG.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_EH.dat b/vendor/babel/localedata/ar_EH.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ar_EH.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_ER.dat b/vendor/babel/localedata/ar_ER.dat deleted file mode 100644 index 61ff1648..00000000 Binary files a/vendor/babel/localedata/ar_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_IL.dat b/vendor/babel/localedata/ar_IL.dat deleted file mode 100644 index 36bd112d..00000000 --- a/vendor/babel/localedata/ar_IL.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_IQ.dat b/vendor/babel/localedata/ar_IQ.dat deleted file mode 100644 index 882435e4..00000000 Binary files a/vendor/babel/localedata/ar_IQ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_JO.dat b/vendor/babel/localedata/ar_JO.dat deleted file mode 100644 index 563ecb41..00000000 Binary files a/vendor/babel/localedata/ar_JO.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_KM.dat b/vendor/babel/localedata/ar_KM.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ar_KM.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_KW.dat b/vendor/babel/localedata/ar_KW.dat deleted file mode 100644 index 72b45424..00000000 --- a/vendor/babel/localedata/ar_KW.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_LB.dat b/vendor/babel/localedata/ar_LB.dat deleted file mode 100644 index 369f5368..00000000 Binary files a/vendor/babel/localedata/ar_LB.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_LY.dat b/vendor/babel/localedata/ar_LY.dat deleted file mode 100644 index dc63b518..00000000 Binary files a/vendor/babel/localedata/ar_LY.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_MA.dat b/vendor/babel/localedata/ar_MA.dat deleted file mode 100644 index 741e7913..00000000 Binary files a/vendor/babel/localedata/ar_MA.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_MR.dat b/vendor/babel/localedata/ar_MR.dat deleted file mode 100644 index 6ea05710..00000000 Binary files a/vendor/babel/localedata/ar_MR.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_OM.dat b/vendor/babel/localedata/ar_OM.dat deleted file mode 100644 index 150c7e3b..00000000 --- a/vendor/babel/localedata/ar_OM.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_PS.dat b/vendor/babel/localedata/ar_PS.dat deleted file mode 100644 index 889cfe8c..00000000 Binary files a/vendor/babel/localedata/ar_PS.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_QA.dat b/vendor/babel/localedata/ar_QA.dat deleted file mode 100644 index c63c35df..00000000 Binary files a/vendor/babel/localedata/ar_QA.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_SA.dat b/vendor/babel/localedata/ar_SA.dat deleted file mode 100644 index 052e7456..00000000 Binary files a/vendor/babel/localedata/ar_SA.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_SD.dat b/vendor/babel/localedata/ar_SD.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/ar_SD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_SO.dat b/vendor/babel/localedata/ar_SO.dat deleted file mode 100644 index 39a66503..00000000 Binary files a/vendor/babel/localedata/ar_SO.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_SY.dat b/vendor/babel/localedata/ar_SY.dat deleted file mode 100644 index 96f96b03..00000000 Binary files a/vendor/babel/localedata/ar_SY.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_TD.dat b/vendor/babel/localedata/ar_TD.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/ar_TD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/ar_TN.dat b/vendor/babel/localedata/ar_TN.dat deleted file mode 100644 index 5144a9c7..00000000 Binary files a/vendor/babel/localedata/ar_TN.dat and /dev/null differ diff --git a/vendor/babel/localedata/ar_YE.dat b/vendor/babel/localedata/ar_YE.dat deleted file mode 100644 index 052e7456..00000000 Binary files a/vendor/babel/localedata/ar_YE.dat and /dev/null differ diff --git a/vendor/babel/localedata/as.dat b/vendor/babel/localedata/as.dat deleted file mode 100644 index ecf4a3fb..00000000 Binary files a/vendor/babel/localedata/as.dat and /dev/null differ diff --git a/vendor/babel/localedata/as_IN.dat b/vendor/babel/localedata/as_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/as_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/asa.dat b/vendor/babel/localedata/asa.dat deleted file mode 100644 index 599f6193..00000000 Binary files a/vendor/babel/localedata/asa.dat and /dev/null differ diff --git a/vendor/babel/localedata/asa_TZ.dat b/vendor/babel/localedata/asa_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/asa_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ast.dat b/vendor/babel/localedata/ast.dat deleted file mode 100644 index b3544d0e..00000000 Binary files a/vendor/babel/localedata/ast.dat and /dev/null differ diff --git a/vendor/babel/localedata/ast_ES.dat b/vendor/babel/localedata/ast_ES.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/ast_ES.dat and /dev/null differ diff --git a/vendor/babel/localedata/az.dat b/vendor/babel/localedata/az.dat deleted file mode 100644 index c31ec593..00000000 Binary files a/vendor/babel/localedata/az.dat and /dev/null differ diff --git a/vendor/babel/localedata/az_Cyrl.dat b/vendor/babel/localedata/az_Cyrl.dat deleted file mode 100644 index 1d74d11b..00000000 Binary files a/vendor/babel/localedata/az_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/az_Cyrl_AZ.dat b/vendor/babel/localedata/az_Cyrl_AZ.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/az_Cyrl_AZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/az_Latn.dat b/vendor/babel/localedata/az_Latn.dat deleted file mode 100644 index 980ab6b6..00000000 Binary files a/vendor/babel/localedata/az_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/az_Latn_AZ.dat b/vendor/babel/localedata/az_Latn_AZ.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/az_Latn_AZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/bas.dat b/vendor/babel/localedata/bas.dat deleted file mode 100644 index fe53dafc..00000000 Binary files a/vendor/babel/localedata/bas.dat and /dev/null differ diff --git a/vendor/babel/localedata/bas_CM.dat b/vendor/babel/localedata/bas_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/bas_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/be.dat b/vendor/babel/localedata/be.dat deleted file mode 100644 index 00270ce9..00000000 Binary files a/vendor/babel/localedata/be.dat and /dev/null differ diff --git a/vendor/babel/localedata/be_BY.dat b/vendor/babel/localedata/be_BY.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/be_BY.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/bem.dat b/vendor/babel/localedata/bem.dat deleted file mode 100644 index 65713a7c..00000000 Binary files a/vendor/babel/localedata/bem.dat and /dev/null differ diff --git a/vendor/babel/localedata/bem_ZM.dat b/vendor/babel/localedata/bem_ZM.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/bem_ZM.dat and /dev/null differ diff --git a/vendor/babel/localedata/bez.dat b/vendor/babel/localedata/bez.dat deleted file mode 100644 index aa7bfc25..00000000 Binary files a/vendor/babel/localedata/bez.dat and /dev/null differ diff --git a/vendor/babel/localedata/bez_TZ.dat b/vendor/babel/localedata/bez_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/bez_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/bg.dat b/vendor/babel/localedata/bg.dat deleted file mode 100644 index 35d43429..00000000 Binary files a/vendor/babel/localedata/bg.dat and /dev/null differ diff --git a/vendor/babel/localedata/bg_BG.dat b/vendor/babel/localedata/bg_BG.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/bg_BG.dat and /dev/null differ diff --git a/vendor/babel/localedata/bm.dat b/vendor/babel/localedata/bm.dat deleted file mode 100644 index 9f5202cd..00000000 Binary files a/vendor/babel/localedata/bm.dat and /dev/null differ diff --git a/vendor/babel/localedata/bm_ML.dat b/vendor/babel/localedata/bm_ML.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/bm_ML.dat and /dev/null differ diff --git a/vendor/babel/localedata/bn.dat b/vendor/babel/localedata/bn.dat deleted file mode 100644 index c5be3e56..00000000 Binary files a/vendor/babel/localedata/bn.dat and /dev/null differ diff --git a/vendor/babel/localedata/bn_BD.dat b/vendor/babel/localedata/bn_BD.dat deleted file mode 100644 index 281a7813..00000000 --- a/vendor/babel/localedata/bn_BD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/bn_IN.dat b/vendor/babel/localedata/bn_IN.dat deleted file mode 100644 index 07613a58..00000000 Binary files a/vendor/babel/localedata/bn_IN.dat and /dev/null differ diff --git a/vendor/babel/localedata/bo.dat b/vendor/babel/localedata/bo.dat deleted file mode 100644 index bcfa2184..00000000 Binary files a/vendor/babel/localedata/bo.dat and /dev/null differ diff --git a/vendor/babel/localedata/bo_CN.dat b/vendor/babel/localedata/bo_CN.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/bo_CN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/bo_IN.dat b/vendor/babel/localedata/bo_IN.dat deleted file mode 100644 index fbda3e34..00000000 Binary files a/vendor/babel/localedata/bo_IN.dat and /dev/null differ diff --git a/vendor/babel/localedata/br.dat b/vendor/babel/localedata/br.dat deleted file mode 100644 index 2c2bc2cc..00000000 Binary files a/vendor/babel/localedata/br.dat and /dev/null differ diff --git a/vendor/babel/localedata/br_FR.dat b/vendor/babel/localedata/br_FR.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/br_FR.dat and /dev/null differ diff --git a/vendor/babel/localedata/brx.dat b/vendor/babel/localedata/brx.dat deleted file mode 100644 index aaa57e92..00000000 Binary files a/vendor/babel/localedata/brx.dat and /dev/null differ diff --git a/vendor/babel/localedata/brx_IN.dat b/vendor/babel/localedata/brx_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/brx_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/bs.dat b/vendor/babel/localedata/bs.dat deleted file mode 100644 index 82abeed2..00000000 Binary files a/vendor/babel/localedata/bs.dat and /dev/null differ diff --git a/vendor/babel/localedata/bs_Cyrl.dat b/vendor/babel/localedata/bs_Cyrl.dat deleted file mode 100644 index f152d9de..00000000 Binary files a/vendor/babel/localedata/bs_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/bs_Cyrl_BA.dat b/vendor/babel/localedata/bs_Cyrl_BA.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/bs_Cyrl_BA.dat and /dev/null differ diff --git a/vendor/babel/localedata/bs_Latn.dat b/vendor/babel/localedata/bs_Latn.dat deleted file mode 100644 index a882c5bc..00000000 Binary files a/vendor/babel/localedata/bs_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/bs_Latn_BA.dat b/vendor/babel/localedata/bs_Latn_BA.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/bs_Latn_BA.dat and /dev/null differ diff --git a/vendor/babel/localedata/byn.dat b/vendor/babel/localedata/byn.dat deleted file mode 100644 index 6cbcf5a4..00000000 Binary files a/vendor/babel/localedata/byn.dat and /dev/null differ diff --git a/vendor/babel/localedata/byn_ER.dat b/vendor/babel/localedata/byn_ER.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/byn_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/ca.dat b/vendor/babel/localedata/ca.dat deleted file mode 100644 index 82aa8738..00000000 Binary files a/vendor/babel/localedata/ca.dat and /dev/null differ diff --git a/vendor/babel/localedata/ca_AD.dat b/vendor/babel/localedata/ca_AD.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/ca_AD.dat and /dev/null differ diff --git a/vendor/babel/localedata/ca_ES.dat b/vendor/babel/localedata/ca_ES.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/ca_ES.dat and /dev/null differ diff --git a/vendor/babel/localedata/cgg.dat b/vendor/babel/localedata/cgg.dat deleted file mode 100644 index 7550fa75..00000000 Binary files a/vendor/babel/localedata/cgg.dat and /dev/null differ diff --git a/vendor/babel/localedata/cgg_UG.dat b/vendor/babel/localedata/cgg_UG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/cgg_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/chr.dat b/vendor/babel/localedata/chr.dat deleted file mode 100644 index 65dee5bb..00000000 Binary files a/vendor/babel/localedata/chr.dat and /dev/null differ diff --git a/vendor/babel/localedata/chr_US.dat b/vendor/babel/localedata/chr_US.dat deleted file mode 100644 index e1639abe..00000000 --- a/vendor/babel/localedata/chr_US.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/cs.dat b/vendor/babel/localedata/cs.dat deleted file mode 100644 index f85d6ade..00000000 Binary files a/vendor/babel/localedata/cs.dat and /dev/null differ diff --git a/vendor/babel/localedata/cs_CZ.dat b/vendor/babel/localedata/cs_CZ.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/cs_CZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/cy.dat b/vendor/babel/localedata/cy.dat deleted file mode 100644 index c6ec58fe..00000000 Binary files a/vendor/babel/localedata/cy.dat and /dev/null differ diff --git a/vendor/babel/localedata/cy_GB.dat b/vendor/babel/localedata/cy_GB.dat deleted file mode 100644 index 401708ff..00000000 --- a/vendor/babel/localedata/cy_GB.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/da.dat b/vendor/babel/localedata/da.dat deleted file mode 100644 index 47548dd0..00000000 Binary files a/vendor/babel/localedata/da.dat and /dev/null differ diff --git a/vendor/babel/localedata/da_DK.dat b/vendor/babel/localedata/da_DK.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/da_DK.dat and /dev/null differ diff --git a/vendor/babel/localedata/dav.dat b/vendor/babel/localedata/dav.dat deleted file mode 100644 index cc17bc0b..00000000 Binary files a/vendor/babel/localedata/dav.dat and /dev/null differ diff --git a/vendor/babel/localedata/dav_KE.dat b/vendor/babel/localedata/dav_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/dav_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/de.dat b/vendor/babel/localedata/de.dat deleted file mode 100644 index ba55b507..00000000 Binary files a/vendor/babel/localedata/de.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_AT.dat b/vendor/babel/localedata/de_AT.dat deleted file mode 100644 index bee43e31..00000000 Binary files a/vendor/babel/localedata/de_AT.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_BE.dat b/vendor/babel/localedata/de_BE.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/de_BE.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_CH.dat b/vendor/babel/localedata/de_CH.dat deleted file mode 100644 index a4d26be1..00000000 Binary files a/vendor/babel/localedata/de_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_DE.dat b/vendor/babel/localedata/de_DE.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/de_DE.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_LI.dat b/vendor/babel/localedata/de_LI.dat deleted file mode 100644 index 2b92c9b0..00000000 Binary files a/vendor/babel/localedata/de_LI.dat and /dev/null differ diff --git a/vendor/babel/localedata/de_LU.dat b/vendor/babel/localedata/de_LU.dat deleted file mode 100644 index 8d080e12..00000000 Binary files a/vendor/babel/localedata/de_LU.dat and /dev/null differ diff --git a/vendor/babel/localedata/dje.dat b/vendor/babel/localedata/dje.dat deleted file mode 100644 index b3997c75..00000000 Binary files a/vendor/babel/localedata/dje.dat and /dev/null differ diff --git a/vendor/babel/localedata/dje_NE.dat b/vendor/babel/localedata/dje_NE.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/dje_NE.dat and /dev/null differ diff --git a/vendor/babel/localedata/dua.dat b/vendor/babel/localedata/dua.dat deleted file mode 100644 index 2693dc17..00000000 Binary files a/vendor/babel/localedata/dua.dat and /dev/null differ diff --git a/vendor/babel/localedata/dua_CM.dat b/vendor/babel/localedata/dua_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/dua_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/dyo.dat b/vendor/babel/localedata/dyo.dat deleted file mode 100644 index 9bd62330..00000000 Binary files a/vendor/babel/localedata/dyo.dat and /dev/null differ diff --git a/vendor/babel/localedata/dyo_SN.dat b/vendor/babel/localedata/dyo_SN.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/dyo_SN.dat and /dev/null differ diff --git a/vendor/babel/localedata/dz.dat b/vendor/babel/localedata/dz.dat deleted file mode 100644 index 0ba26a88..00000000 Binary files a/vendor/babel/localedata/dz.dat and /dev/null differ diff --git a/vendor/babel/localedata/dz_BT.dat b/vendor/babel/localedata/dz_BT.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/dz_BT.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/ebu.dat b/vendor/babel/localedata/ebu.dat deleted file mode 100644 index 2ce0ac43..00000000 Binary files a/vendor/babel/localedata/ebu.dat and /dev/null differ diff --git a/vendor/babel/localedata/ebu_KE.dat b/vendor/babel/localedata/ebu_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ebu_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ee.dat b/vendor/babel/localedata/ee.dat deleted file mode 100644 index 28c27433..00000000 Binary files a/vendor/babel/localedata/ee.dat and /dev/null differ diff --git a/vendor/babel/localedata/ee_GH.dat b/vendor/babel/localedata/ee_GH.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ee_GH.dat and /dev/null differ diff --git a/vendor/babel/localedata/ee_TG.dat b/vendor/babel/localedata/ee_TG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ee_TG.dat and /dev/null differ diff --git a/vendor/babel/localedata/el.dat b/vendor/babel/localedata/el.dat deleted file mode 100644 index 9c10ce51..00000000 Binary files a/vendor/babel/localedata/el.dat and /dev/null differ diff --git a/vendor/babel/localedata/el_CY.dat b/vendor/babel/localedata/el_CY.dat deleted file mode 100644 index 83f7d854..00000000 Binary files a/vendor/babel/localedata/el_CY.dat and /dev/null differ diff --git a/vendor/babel/localedata/el_GR.dat b/vendor/babel/localedata/el_GR.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/el_GR.dat and /dev/null differ diff --git a/vendor/babel/localedata/en.dat b/vendor/babel/localedata/en.dat deleted file mode 100644 index cbe140ca..00000000 Binary files a/vendor/babel/localedata/en.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_150.dat b/vendor/babel/localedata/en_150.dat deleted file mode 100644 index 23a79939..00000000 Binary files a/vendor/babel/localedata/en_150.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_AG.dat b/vendor/babel/localedata/en_AG.dat deleted file mode 100644 index c6f15402..00000000 Binary files a/vendor/babel/localedata/en_AG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_AS.dat b/vendor/babel/localedata/en_AS.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/en_AS.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_AU.dat b/vendor/babel/localedata/en_AU.dat deleted file mode 100644 index 14472b36..00000000 Binary files a/vendor/babel/localedata/en_AU.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BB.dat b/vendor/babel/localedata/en_BB.dat deleted file mode 100644 index 40f7b970..00000000 Binary files a/vendor/babel/localedata/en_BB.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BE.dat b/vendor/babel/localedata/en_BE.dat deleted file mode 100644 index 89586c28..00000000 Binary files a/vendor/babel/localedata/en_BE.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BM.dat b/vendor/babel/localedata/en_BM.dat deleted file mode 100644 index cf27a79b..00000000 Binary files a/vendor/babel/localedata/en_BM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BS.dat b/vendor/babel/localedata/en_BS.dat deleted file mode 100644 index 9f24a114..00000000 Binary files a/vendor/babel/localedata/en_BS.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BW.dat b/vendor/babel/localedata/en_BW.dat deleted file mode 100644 index a27e49f6..00000000 Binary files a/vendor/babel/localedata/en_BW.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_BZ.dat b/vendor/babel/localedata/en_BZ.dat deleted file mode 100644 index de2936c1..00000000 Binary files a/vendor/babel/localedata/en_BZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_CA.dat b/vendor/babel/localedata/en_CA.dat deleted file mode 100644 index f46ef188..00000000 Binary files a/vendor/babel/localedata/en_CA.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_CM.dat b/vendor/babel/localedata/en_CM.dat deleted file mode 100644 index ec06b5e9..00000000 Binary files a/vendor/babel/localedata/en_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_DM.dat b/vendor/babel/localedata/en_DM.dat deleted file mode 100644 index c6f15402..00000000 Binary files a/vendor/babel/localedata/en_DM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_Dsrt.dat b/vendor/babel/localedata/en_Dsrt.dat deleted file mode 100644 index c9b5e98d..00000000 Binary files a/vendor/babel/localedata/en_Dsrt.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_Dsrt_US.dat b/vendor/babel/localedata/en_Dsrt_US.dat deleted file mode 100644 index e1639abe..00000000 --- a/vendor/babel/localedata/en_Dsrt_US.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_FJ.dat b/vendor/babel/localedata/en_FJ.dat deleted file mode 100644 index 33e22056..00000000 Binary files a/vendor/babel/localedata/en_FJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_FM.dat b/vendor/babel/localedata/en_FM.dat deleted file mode 100644 index b2c2947d..00000000 Binary files a/vendor/babel/localedata/en_FM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GB.dat b/vendor/babel/localedata/en_GB.dat deleted file mode 100644 index 2baebd09..00000000 Binary files a/vendor/babel/localedata/en_GB.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GD.dat b/vendor/babel/localedata/en_GD.dat deleted file mode 100644 index 049f10a1..00000000 Binary files a/vendor/babel/localedata/en_GD.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GG.dat b/vendor/babel/localedata/en_GG.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/en_GG.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_GH.dat b/vendor/babel/localedata/en_GH.dat deleted file mode 100644 index dceb36d4..00000000 Binary files a/vendor/babel/localedata/en_GH.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GI.dat b/vendor/babel/localedata/en_GI.dat deleted file mode 100644 index 33a2e60a..00000000 Binary files a/vendor/babel/localedata/en_GI.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GM.dat b/vendor/babel/localedata/en_GM.dat deleted file mode 100644 index a7c4e455..00000000 Binary files a/vendor/babel/localedata/en_GM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GU.dat b/vendor/babel/localedata/en_GU.dat deleted file mode 100644 index b022c614..00000000 Binary files a/vendor/babel/localedata/en_GU.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_GY.dat b/vendor/babel/localedata/en_GY.dat deleted file mode 100644 index 19a886ca..00000000 Binary files a/vendor/babel/localedata/en_GY.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_HK.dat b/vendor/babel/localedata/en_HK.dat deleted file mode 100644 index a62e0137..00000000 Binary files a/vendor/babel/localedata/en_HK.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_IE.dat b/vendor/babel/localedata/en_IE.dat deleted file mode 100644 index e4bd14ef..00000000 Binary files a/vendor/babel/localedata/en_IE.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_IM.dat b/vendor/babel/localedata/en_IM.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/en_IM.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_IN.dat b/vendor/babel/localedata/en_IN.dat deleted file mode 100644 index cea4cae9..00000000 Binary files a/vendor/babel/localedata/en_IN.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_JE.dat b/vendor/babel/localedata/en_JE.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/en_JE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_JM.dat b/vendor/babel/localedata/en_JM.dat deleted file mode 100644 index e792cb93..00000000 Binary files a/vendor/babel/localedata/en_JM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_KE.dat b/vendor/babel/localedata/en_KE.dat deleted file mode 100644 index f1dc0e5d..00000000 Binary files a/vendor/babel/localedata/en_KE.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_KI.dat b/vendor/babel/localedata/en_KI.dat deleted file mode 100644 index b87da16d..00000000 Binary files a/vendor/babel/localedata/en_KI.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_KN.dat b/vendor/babel/localedata/en_KN.dat deleted file mode 100644 index 049f10a1..00000000 Binary files a/vendor/babel/localedata/en_KN.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_KY.dat b/vendor/babel/localedata/en_KY.dat deleted file mode 100644 index a6415f49..00000000 Binary files a/vendor/babel/localedata/en_KY.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_LC.dat b/vendor/babel/localedata/en_LC.dat deleted file mode 100644 index 049f10a1..00000000 Binary files a/vendor/babel/localedata/en_LC.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_LR.dat b/vendor/babel/localedata/en_LR.dat deleted file mode 100644 index e92eac71..00000000 Binary files a/vendor/babel/localedata/en_LR.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_LS.dat b/vendor/babel/localedata/en_LS.dat deleted file mode 100644 index 76aab995..00000000 Binary files a/vendor/babel/localedata/en_LS.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MG.dat b/vendor/babel/localedata/en_MG.dat deleted file mode 100644 index 25efe8a0..00000000 Binary files a/vendor/babel/localedata/en_MG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MH.dat b/vendor/babel/localedata/en_MH.dat deleted file mode 100644 index 2a51e5ab..00000000 Binary files a/vendor/babel/localedata/en_MH.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MP.dat b/vendor/babel/localedata/en_MP.dat deleted file mode 100644 index b2c2947d..00000000 Binary files a/vendor/babel/localedata/en_MP.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MT.dat b/vendor/babel/localedata/en_MT.dat deleted file mode 100644 index d68588a1..00000000 Binary files a/vendor/babel/localedata/en_MT.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MU.dat b/vendor/babel/localedata/en_MU.dat deleted file mode 100644 index 6434affc..00000000 Binary files a/vendor/babel/localedata/en_MU.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_MW.dat b/vendor/babel/localedata/en_MW.dat deleted file mode 100644 index 2b728133..00000000 Binary files a/vendor/babel/localedata/en_MW.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_NA.dat b/vendor/babel/localedata/en_NA.dat deleted file mode 100644 index ab7159fb..00000000 Binary files a/vendor/babel/localedata/en_NA.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_NG.dat b/vendor/babel/localedata/en_NG.dat deleted file mode 100644 index ca4464e6..00000000 Binary files a/vendor/babel/localedata/en_NG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_NZ.dat b/vendor/babel/localedata/en_NZ.dat deleted file mode 100644 index 1e8d8df3..00000000 Binary files a/vendor/babel/localedata/en_NZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_PG.dat b/vendor/babel/localedata/en_PG.dat deleted file mode 100644 index 57dc8c55..00000000 Binary files a/vendor/babel/localedata/en_PG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_PH.dat b/vendor/babel/localedata/en_PH.dat deleted file mode 100644 index edf7178f..00000000 Binary files a/vendor/babel/localedata/en_PH.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_PK.dat b/vendor/babel/localedata/en_PK.dat deleted file mode 100644 index 40744ea8..00000000 Binary files a/vendor/babel/localedata/en_PK.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_PR.dat b/vendor/babel/localedata/en_PR.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/en_PR.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_PW.dat b/vendor/babel/localedata/en_PW.dat deleted file mode 100644 index b2c2947d..00000000 Binary files a/vendor/babel/localedata/en_PW.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SB.dat b/vendor/babel/localedata/en_SB.dat deleted file mode 100644 index eaab8887..00000000 Binary files a/vendor/babel/localedata/en_SB.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SC.dat b/vendor/babel/localedata/en_SC.dat deleted file mode 100644 index 355ec817..00000000 Binary files a/vendor/babel/localedata/en_SC.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SG.dat b/vendor/babel/localedata/en_SG.dat deleted file mode 100644 index d12d553c..00000000 Binary files a/vendor/babel/localedata/en_SG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SL.dat b/vendor/babel/localedata/en_SL.dat deleted file mode 100644 index a9b33f38..00000000 Binary files a/vendor/babel/localedata/en_SL.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SS.dat b/vendor/babel/localedata/en_SS.dat deleted file mode 100644 index 98b33925..00000000 Binary files a/vendor/babel/localedata/en_SS.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_SZ.dat b/vendor/babel/localedata/en_SZ.dat deleted file mode 100644 index 2bd0229d..00000000 Binary files a/vendor/babel/localedata/en_SZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_TC.dat b/vendor/babel/localedata/en_TC.dat deleted file mode 100644 index 59cbead6..00000000 Binary files a/vendor/babel/localedata/en_TC.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_TO.dat b/vendor/babel/localedata/en_TO.dat deleted file mode 100644 index 5f95a001..00000000 Binary files a/vendor/babel/localedata/en_TO.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_TT.dat b/vendor/babel/localedata/en_TT.dat deleted file mode 100644 index fa9a6ce1..00000000 Binary files a/vendor/babel/localedata/en_TT.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_TZ.dat b/vendor/babel/localedata/en_TZ.dat deleted file mode 100644 index 26922be1..00000000 Binary files a/vendor/babel/localedata/en_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_UG.dat b/vendor/babel/localedata/en_UG.dat deleted file mode 100644 index c41cbbf0..00000000 Binary files a/vendor/babel/localedata/en_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_UM.dat b/vendor/babel/localedata/en_UM.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/en_UM.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_US.dat b/vendor/babel/localedata/en_US.dat deleted file mode 100644 index e1639abe..00000000 --- a/vendor/babel/localedata/en_US.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_US_POSIX.dat b/vendor/babel/localedata/en_US_POSIX.dat deleted file mode 100644 index 09cbe7ec..00000000 Binary files a/vendor/babel/localedata/en_US_POSIX.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_VC.dat b/vendor/babel/localedata/en_VC.dat deleted file mode 100644 index 049f10a1..00000000 Binary files a/vendor/babel/localedata/en_VC.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_VG.dat b/vendor/babel/localedata/en_VG.dat deleted file mode 100644 index 59cbead6..00000000 Binary files a/vendor/babel/localedata/en_VG.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_VI.dat b/vendor/babel/localedata/en_VI.dat deleted file mode 100644 index e1639abe..00000000 --- a/vendor/babel/localedata/en_VI.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/en_VU.dat b/vendor/babel/localedata/en_VU.dat deleted file mode 100644 index bf189111..00000000 Binary files a/vendor/babel/localedata/en_VU.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_WS.dat b/vendor/babel/localedata/en_WS.dat deleted file mode 100644 index d9693cc4..00000000 Binary files a/vendor/babel/localedata/en_WS.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_ZA.dat b/vendor/babel/localedata/en_ZA.dat deleted file mode 100644 index c66ff2ba..00000000 Binary files a/vendor/babel/localedata/en_ZA.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_ZM.dat b/vendor/babel/localedata/en_ZM.dat deleted file mode 100644 index 6faaea85..00000000 Binary files a/vendor/babel/localedata/en_ZM.dat and /dev/null differ diff --git a/vendor/babel/localedata/en_ZW.dat b/vendor/babel/localedata/en_ZW.dat deleted file mode 100644 index 97fee561..00000000 Binary files a/vendor/babel/localedata/en_ZW.dat and /dev/null differ diff --git a/vendor/babel/localedata/eo.dat b/vendor/babel/localedata/eo.dat deleted file mode 100644 index 452c8491..00000000 Binary files a/vendor/babel/localedata/eo.dat and /dev/null differ diff --git a/vendor/babel/localedata/es.dat b/vendor/babel/localedata/es.dat deleted file mode 100644 index a48af1bc..00000000 Binary files a/vendor/babel/localedata/es.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_419.dat b/vendor/babel/localedata/es_419.dat deleted file mode 100644 index b3a15f1b..00000000 Binary files a/vendor/babel/localedata/es_419.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_AR.dat b/vendor/babel/localedata/es_AR.dat deleted file mode 100644 index 7b21f4fc..00000000 Binary files a/vendor/babel/localedata/es_AR.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_BO.dat b/vendor/babel/localedata/es_BO.dat deleted file mode 100644 index 438c3600..00000000 Binary files a/vendor/babel/localedata/es_BO.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_CL.dat b/vendor/babel/localedata/es_CL.dat deleted file mode 100644 index e11cc51d..00000000 Binary files a/vendor/babel/localedata/es_CL.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_CO.dat b/vendor/babel/localedata/es_CO.dat deleted file mode 100644 index dd043336..00000000 Binary files a/vendor/babel/localedata/es_CO.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_CR.dat b/vendor/babel/localedata/es_CR.dat deleted file mode 100644 index f433899f..00000000 Binary files a/vendor/babel/localedata/es_CR.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_CU.dat b/vendor/babel/localedata/es_CU.dat deleted file mode 100644 index 72288202..00000000 Binary files a/vendor/babel/localedata/es_CU.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_DO.dat b/vendor/babel/localedata/es_DO.dat deleted file mode 100644 index 66bebe84..00000000 Binary files a/vendor/babel/localedata/es_DO.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_EA.dat b/vendor/babel/localedata/es_EA.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/es_EA.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_EC.dat b/vendor/babel/localedata/es_EC.dat deleted file mode 100644 index 83d1eaf0..00000000 Binary files a/vendor/babel/localedata/es_EC.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_ES.dat b/vendor/babel/localedata/es_ES.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/es_ES.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_GQ.dat b/vendor/babel/localedata/es_GQ.dat deleted file mode 100644 index 13b86e08..00000000 Binary files a/vendor/babel/localedata/es_GQ.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_GT.dat b/vendor/babel/localedata/es_GT.dat deleted file mode 100644 index 83f5f6a0..00000000 Binary files a/vendor/babel/localedata/es_GT.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_HN.dat b/vendor/babel/localedata/es_HN.dat deleted file mode 100644 index 4aa9304d..00000000 Binary files a/vendor/babel/localedata/es_HN.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_IC.dat b/vendor/babel/localedata/es_IC.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/es_IC.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_MX.dat b/vendor/babel/localedata/es_MX.dat deleted file mode 100644 index ae1c2a34..00000000 Binary files a/vendor/babel/localedata/es_MX.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_NI.dat b/vendor/babel/localedata/es_NI.dat deleted file mode 100644 index 7dfe1848..00000000 Binary files a/vendor/babel/localedata/es_NI.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_PA.dat b/vendor/babel/localedata/es_PA.dat deleted file mode 100644 index 1222b460..00000000 Binary files a/vendor/babel/localedata/es_PA.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_PE.dat b/vendor/babel/localedata/es_PE.dat deleted file mode 100644 index 4da74281..00000000 Binary files a/vendor/babel/localedata/es_PE.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_PH.dat b/vendor/babel/localedata/es_PH.dat deleted file mode 100644 index f4663c6e..00000000 Binary files a/vendor/babel/localedata/es_PH.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_PR.dat b/vendor/babel/localedata/es_PR.dat deleted file mode 100644 index 8db3f313..00000000 Binary files a/vendor/babel/localedata/es_PR.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_PY.dat b/vendor/babel/localedata/es_PY.dat deleted file mode 100644 index 0a022b42..00000000 Binary files a/vendor/babel/localedata/es_PY.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_SV.dat b/vendor/babel/localedata/es_SV.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/es_SV.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/es_US.dat b/vendor/babel/localedata/es_US.dat deleted file mode 100644 index e427ff3c..00000000 Binary files a/vendor/babel/localedata/es_US.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_UY.dat b/vendor/babel/localedata/es_UY.dat deleted file mode 100644 index bfd9e033..00000000 Binary files a/vendor/babel/localedata/es_UY.dat and /dev/null differ diff --git a/vendor/babel/localedata/es_VE.dat b/vendor/babel/localedata/es_VE.dat deleted file mode 100644 index 65b44e1b..00000000 Binary files a/vendor/babel/localedata/es_VE.dat and /dev/null differ diff --git a/vendor/babel/localedata/et.dat b/vendor/babel/localedata/et.dat deleted file mode 100644 index a9d8bb9e..00000000 Binary files a/vendor/babel/localedata/et.dat and /dev/null differ diff --git a/vendor/babel/localedata/et_EE.dat b/vendor/babel/localedata/et_EE.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/et_EE.dat and /dev/null differ diff --git a/vendor/babel/localedata/eu.dat b/vendor/babel/localedata/eu.dat deleted file mode 100644 index 6131e096..00000000 Binary files a/vendor/babel/localedata/eu.dat and /dev/null differ diff --git a/vendor/babel/localedata/eu_ES.dat b/vendor/babel/localedata/eu_ES.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/eu_ES.dat and /dev/null differ diff --git a/vendor/babel/localedata/ewo.dat b/vendor/babel/localedata/ewo.dat deleted file mode 100644 index 24172c24..00000000 Binary files a/vendor/babel/localedata/ewo.dat and /dev/null differ diff --git a/vendor/babel/localedata/ewo_CM.dat b/vendor/babel/localedata/ewo_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ewo_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/fa.dat b/vendor/babel/localedata/fa.dat deleted file mode 100644 index e5d7d4a2..00000000 Binary files a/vendor/babel/localedata/fa.dat and /dev/null differ diff --git a/vendor/babel/localedata/fa_AF.dat b/vendor/babel/localedata/fa_AF.dat deleted file mode 100644 index b7592aa2..00000000 Binary files a/vendor/babel/localedata/fa_AF.dat and /dev/null differ diff --git a/vendor/babel/localedata/fa_IR.dat b/vendor/babel/localedata/fa_IR.dat deleted file mode 100644 index 150c7e3b..00000000 --- a/vendor/babel/localedata/fa_IR.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/ff.dat b/vendor/babel/localedata/ff.dat deleted file mode 100644 index 5b3aab1c..00000000 Binary files a/vendor/babel/localedata/ff.dat and /dev/null differ diff --git a/vendor/babel/localedata/ff_SN.dat b/vendor/babel/localedata/ff_SN.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ff_SN.dat and /dev/null differ diff --git a/vendor/babel/localedata/fi.dat b/vendor/babel/localedata/fi.dat deleted file mode 100644 index cd638377..00000000 Binary files a/vendor/babel/localedata/fi.dat and /dev/null differ diff --git a/vendor/babel/localedata/fi_FI.dat b/vendor/babel/localedata/fi_FI.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fi_FI.dat and /dev/null differ diff --git a/vendor/babel/localedata/fil.dat b/vendor/babel/localedata/fil.dat deleted file mode 100644 index 4e790acd..00000000 Binary files a/vendor/babel/localedata/fil.dat and /dev/null differ diff --git a/vendor/babel/localedata/fil_PH.dat b/vendor/babel/localedata/fil_PH.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/fil_PH.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/fo.dat b/vendor/babel/localedata/fo.dat deleted file mode 100644 index 53dad881..00000000 Binary files a/vendor/babel/localedata/fo.dat and /dev/null differ diff --git a/vendor/babel/localedata/fo_FO.dat b/vendor/babel/localedata/fo_FO.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fo_FO.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr.dat b/vendor/babel/localedata/fr.dat deleted file mode 100644 index 12624b77..00000000 Binary files a/vendor/babel/localedata/fr.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_BE.dat b/vendor/babel/localedata/fr_BE.dat deleted file mode 100644 index 873a78c0..00000000 Binary files a/vendor/babel/localedata/fr_BE.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_BF.dat b/vendor/babel/localedata/fr_BF.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_BF.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_BI.dat b/vendor/babel/localedata/fr_BI.dat deleted file mode 100644 index 0e37e699..00000000 Binary files a/vendor/babel/localedata/fr_BI.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_BJ.dat b/vendor/babel/localedata/fr_BJ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_BJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_BL.dat b/vendor/babel/localedata/fr_BL.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_BL.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_CA.dat b/vendor/babel/localedata/fr_CA.dat deleted file mode 100644 index 22e00e6a..00000000 Binary files a/vendor/babel/localedata/fr_CA.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_CD.dat b/vendor/babel/localedata/fr_CD.dat deleted file mode 100644 index 54d87d99..00000000 Binary files a/vendor/babel/localedata/fr_CD.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_CF.dat b/vendor/babel/localedata/fr_CF.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_CF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_CG.dat b/vendor/babel/localedata/fr_CG.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_CG.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_CH.dat b/vendor/babel/localedata/fr_CH.dat deleted file mode 100644 index da2a9705..00000000 Binary files a/vendor/babel/localedata/fr_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_CI.dat b/vendor/babel/localedata/fr_CI.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_CI.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_CM.dat b/vendor/babel/localedata/fr_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/fr_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_DJ.dat b/vendor/babel/localedata/fr_DJ.dat deleted file mode 100644 index b0ee4421..00000000 Binary files a/vendor/babel/localedata/fr_DJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_DZ.dat b/vendor/babel/localedata/fr_DZ.dat deleted file mode 100644 index be317e2b..00000000 Binary files a/vendor/babel/localedata/fr_DZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_FR.dat b/vendor/babel/localedata/fr_FR.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fr_FR.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_GA.dat b/vendor/babel/localedata/fr_GA.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_GA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_GF.dat b/vendor/babel/localedata/fr_GF.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fr_GF.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_GN.dat b/vendor/babel/localedata/fr_GN.dat deleted file mode 100644 index 4207c303..00000000 Binary files a/vendor/babel/localedata/fr_GN.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_GP.dat b/vendor/babel/localedata/fr_GP.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fr_GP.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_GQ.dat b/vendor/babel/localedata/fr_GQ.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_GQ.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_HT.dat b/vendor/babel/localedata/fr_HT.dat deleted file mode 100644 index b57f296d..00000000 Binary files a/vendor/babel/localedata/fr_HT.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_KM.dat b/vendor/babel/localedata/fr_KM.dat deleted file mode 100644 index 6a215c9a..00000000 Binary files a/vendor/babel/localedata/fr_KM.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_LU.dat b/vendor/babel/localedata/fr_LU.dat deleted file mode 100644 index 947f11cb..00000000 Binary files a/vendor/babel/localedata/fr_LU.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_MA.dat b/vendor/babel/localedata/fr_MA.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/fr_MA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_MC.dat b/vendor/babel/localedata/fr_MC.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fr_MC.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_MF.dat b/vendor/babel/localedata/fr_MF.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_MF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_MG.dat b/vendor/babel/localedata/fr_MG.dat deleted file mode 100644 index 507d9879..00000000 Binary files a/vendor/babel/localedata/fr_MG.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_ML.dat b/vendor/babel/localedata/fr_ML.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_ML.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_MQ.dat b/vendor/babel/localedata/fr_MQ.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fr_MQ.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_MR.dat b/vendor/babel/localedata/fr_MR.dat deleted file mode 100644 index e37ceddb..00000000 Binary files a/vendor/babel/localedata/fr_MR.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_MU.dat b/vendor/babel/localedata/fr_MU.dat deleted file mode 100644 index 1dcad22d..00000000 Binary files a/vendor/babel/localedata/fr_MU.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_NC.dat b/vendor/babel/localedata/fr_NC.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_NC.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_NE.dat b/vendor/babel/localedata/fr_NE.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_NE.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_PF.dat b/vendor/babel/localedata/fr_PF.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_PF.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_RE.dat b/vendor/babel/localedata/fr_RE.dat deleted file mode 100644 index 9721f8c0..00000000 Binary files a/vendor/babel/localedata/fr_RE.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_RW.dat b/vendor/babel/localedata/fr_RW.dat deleted file mode 100644 index a6a51af8..00000000 Binary files a/vendor/babel/localedata/fr_RW.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_SC.dat b/vendor/babel/localedata/fr_SC.dat deleted file mode 100644 index 017b99b9..00000000 Binary files a/vendor/babel/localedata/fr_SC.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_SN.dat b/vendor/babel/localedata/fr_SN.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_SN.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_SY.dat b/vendor/babel/localedata/fr_SY.dat deleted file mode 100644 index d2dd10e5..00000000 Binary files a/vendor/babel/localedata/fr_SY.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_TD.dat b/vendor/babel/localedata/fr_TD.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/fr_TD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/fr_TG.dat b/vendor/babel/localedata/fr_TG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_TG.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_TN.dat b/vendor/babel/localedata/fr_TN.dat deleted file mode 100644 index 70cc0661..00000000 Binary files a/vendor/babel/localedata/fr_TN.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_VU.dat b/vendor/babel/localedata/fr_VU.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_VU.dat and /dev/null differ diff --git a/vendor/babel/localedata/fr_YT.dat b/vendor/babel/localedata/fr_YT.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/fr_YT.dat and /dev/null differ diff --git a/vendor/babel/localedata/fur.dat b/vendor/babel/localedata/fur.dat deleted file mode 100644 index 92767739..00000000 Binary files a/vendor/babel/localedata/fur.dat and /dev/null differ diff --git a/vendor/babel/localedata/fur_IT.dat b/vendor/babel/localedata/fur_IT.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/fur_IT.dat and /dev/null differ diff --git a/vendor/babel/localedata/ga.dat b/vendor/babel/localedata/ga.dat deleted file mode 100644 index e47984a3..00000000 Binary files a/vendor/babel/localedata/ga.dat and /dev/null differ diff --git a/vendor/babel/localedata/ga_IE.dat b/vendor/babel/localedata/ga_IE.dat deleted file mode 100644 index 401708ff..00000000 --- a/vendor/babel/localedata/ga_IE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/gd.dat b/vendor/babel/localedata/gd.dat deleted file mode 100644 index d8d4a035..00000000 Binary files a/vendor/babel/localedata/gd.dat and /dev/null differ diff --git a/vendor/babel/localedata/gd_GB.dat b/vendor/babel/localedata/gd_GB.dat deleted file mode 100644 index 401708ff..00000000 --- a/vendor/babel/localedata/gd_GB.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/gl.dat b/vendor/babel/localedata/gl.dat deleted file mode 100644 index c9fcaabf..00000000 Binary files a/vendor/babel/localedata/gl.dat and /dev/null differ diff --git a/vendor/babel/localedata/gl_ES.dat b/vendor/babel/localedata/gl_ES.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/gl_ES.dat and /dev/null differ diff --git a/vendor/babel/localedata/gsw.dat b/vendor/babel/localedata/gsw.dat deleted file mode 100644 index 06178116..00000000 Binary files a/vendor/babel/localedata/gsw.dat and /dev/null differ diff --git a/vendor/babel/localedata/gsw_CH.dat b/vendor/babel/localedata/gsw_CH.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/gsw_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/gu.dat b/vendor/babel/localedata/gu.dat deleted file mode 100644 index 19621ca2..00000000 Binary files a/vendor/babel/localedata/gu.dat and /dev/null differ diff --git a/vendor/babel/localedata/gu_IN.dat b/vendor/babel/localedata/gu_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/gu_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/guz.dat b/vendor/babel/localedata/guz.dat deleted file mode 100644 index d287c50c..00000000 Binary files a/vendor/babel/localedata/guz.dat and /dev/null differ diff --git a/vendor/babel/localedata/guz_KE.dat b/vendor/babel/localedata/guz_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/guz_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/gv.dat b/vendor/babel/localedata/gv.dat deleted file mode 100644 index 4d923589..00000000 Binary files a/vendor/babel/localedata/gv.dat and /dev/null differ diff --git a/vendor/babel/localedata/gv_GB.dat b/vendor/babel/localedata/gv_GB.dat deleted file mode 100644 index 401708ff..00000000 --- a/vendor/babel/localedata/gv_GB.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ha.dat b/vendor/babel/localedata/ha.dat deleted file mode 100644 index d42e1f23..00000000 Binary files a/vendor/babel/localedata/ha.dat and /dev/null differ diff --git a/vendor/babel/localedata/ha_Latn.dat b/vendor/babel/localedata/ha_Latn.dat deleted file mode 100644 index 27760a1c..00000000 Binary files a/vendor/babel/localedata/ha_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/ha_Latn_GH.dat b/vendor/babel/localedata/ha_Latn_GH.dat deleted file mode 100644 index a6c4ac10..00000000 Binary files a/vendor/babel/localedata/ha_Latn_GH.dat and /dev/null differ diff --git a/vendor/babel/localedata/ha_Latn_NE.dat b/vendor/babel/localedata/ha_Latn_NE.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ha_Latn_NE.dat and /dev/null differ diff --git a/vendor/babel/localedata/ha_Latn_NG.dat b/vendor/babel/localedata/ha_Latn_NG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ha_Latn_NG.dat and /dev/null differ diff --git a/vendor/babel/localedata/haw.dat b/vendor/babel/localedata/haw.dat deleted file mode 100644 index 74c46ef8..00000000 Binary files a/vendor/babel/localedata/haw.dat and /dev/null differ diff --git a/vendor/babel/localedata/haw_US.dat b/vendor/babel/localedata/haw_US.dat deleted file mode 100644 index e1639abe..00000000 --- a/vendor/babel/localedata/haw_US.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/he.dat b/vendor/babel/localedata/he.dat deleted file mode 100644 index f085ae17..00000000 Binary files a/vendor/babel/localedata/he.dat and /dev/null differ diff --git a/vendor/babel/localedata/he_IL.dat b/vendor/babel/localedata/he_IL.dat deleted file mode 100644 index 36bd112d..00000000 --- a/vendor/babel/localedata/he_IL.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/hi.dat b/vendor/babel/localedata/hi.dat deleted file mode 100644 index 26f01c7e..00000000 Binary files a/vendor/babel/localedata/hi.dat and /dev/null differ diff --git a/vendor/babel/localedata/hi_IN.dat b/vendor/babel/localedata/hi_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/hi_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/hr.dat b/vendor/babel/localedata/hr.dat deleted file mode 100644 index c11d3951..00000000 Binary files a/vendor/babel/localedata/hr.dat and /dev/null differ diff --git a/vendor/babel/localedata/hr_BA.dat b/vendor/babel/localedata/hr_BA.dat deleted file mode 100644 index 7b236885..00000000 Binary files a/vendor/babel/localedata/hr_BA.dat and /dev/null differ diff --git a/vendor/babel/localedata/hr_HR.dat b/vendor/babel/localedata/hr_HR.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/hr_HR.dat and /dev/null differ diff --git a/vendor/babel/localedata/hu.dat b/vendor/babel/localedata/hu.dat deleted file mode 100644 index ddc9aae1..00000000 Binary files a/vendor/babel/localedata/hu.dat and /dev/null differ diff --git a/vendor/babel/localedata/hu_HU.dat b/vendor/babel/localedata/hu_HU.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/hu_HU.dat and /dev/null differ diff --git a/vendor/babel/localedata/hy.dat b/vendor/babel/localedata/hy.dat deleted file mode 100644 index 59fc3928..00000000 Binary files a/vendor/babel/localedata/hy.dat and /dev/null differ diff --git a/vendor/babel/localedata/hy_AM.dat b/vendor/babel/localedata/hy_AM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/hy_AM.dat and /dev/null differ diff --git a/vendor/babel/localedata/ia.dat b/vendor/babel/localedata/ia.dat deleted file mode 100644 index d97e3257..00000000 Binary files a/vendor/babel/localedata/ia.dat and /dev/null differ diff --git a/vendor/babel/localedata/ia_FR.dat b/vendor/babel/localedata/ia_FR.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/ia_FR.dat and /dev/null differ diff --git a/vendor/babel/localedata/id.dat b/vendor/babel/localedata/id.dat deleted file mode 100644 index 1b8398a6..00000000 Binary files a/vendor/babel/localedata/id.dat and /dev/null differ diff --git a/vendor/babel/localedata/id_ID.dat b/vendor/babel/localedata/id_ID.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/id_ID.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/ig.dat b/vendor/babel/localedata/ig.dat deleted file mode 100644 index c19bb598..00000000 Binary files a/vendor/babel/localedata/ig.dat and /dev/null differ diff --git a/vendor/babel/localedata/ig_NG.dat b/vendor/babel/localedata/ig_NG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ig_NG.dat and /dev/null differ diff --git a/vendor/babel/localedata/ii.dat b/vendor/babel/localedata/ii.dat deleted file mode 100644 index 1b6499e9..00000000 Binary files a/vendor/babel/localedata/ii.dat and /dev/null differ diff --git a/vendor/babel/localedata/ii_CN.dat b/vendor/babel/localedata/ii_CN.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/ii_CN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/is.dat b/vendor/babel/localedata/is.dat deleted file mode 100644 index f1996ea6..00000000 Binary files a/vendor/babel/localedata/is.dat and /dev/null differ diff --git a/vendor/babel/localedata/is_IS.dat b/vendor/babel/localedata/is_IS.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/is_IS.dat and /dev/null differ diff --git a/vendor/babel/localedata/it.dat b/vendor/babel/localedata/it.dat deleted file mode 100644 index b084d771..00000000 Binary files a/vendor/babel/localedata/it.dat and /dev/null differ diff --git a/vendor/babel/localedata/it_CH.dat b/vendor/babel/localedata/it_CH.dat deleted file mode 100644 index d37370cc..00000000 Binary files a/vendor/babel/localedata/it_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/it_IT.dat b/vendor/babel/localedata/it_IT.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/it_IT.dat and /dev/null differ diff --git a/vendor/babel/localedata/it_SM.dat b/vendor/babel/localedata/it_SM.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/it_SM.dat and /dev/null differ diff --git a/vendor/babel/localedata/ja.dat b/vendor/babel/localedata/ja.dat deleted file mode 100644 index 7351a747..00000000 Binary files a/vendor/babel/localedata/ja.dat and /dev/null differ diff --git a/vendor/babel/localedata/ja_JP.dat b/vendor/babel/localedata/ja_JP.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/ja_JP.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/jgo.dat b/vendor/babel/localedata/jgo.dat deleted file mode 100644 index 616271b5..00000000 Binary files a/vendor/babel/localedata/jgo.dat and /dev/null differ diff --git a/vendor/babel/localedata/jgo_CM.dat b/vendor/babel/localedata/jgo_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/jgo_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/jmc.dat b/vendor/babel/localedata/jmc.dat deleted file mode 100644 index ccc3b836..00000000 Binary files a/vendor/babel/localedata/jmc.dat and /dev/null differ diff --git a/vendor/babel/localedata/jmc_TZ.dat b/vendor/babel/localedata/jmc_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/jmc_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ka.dat b/vendor/babel/localedata/ka.dat deleted file mode 100644 index 1979e8aa..00000000 Binary files a/vendor/babel/localedata/ka.dat and /dev/null differ diff --git a/vendor/babel/localedata/ka_GE.dat b/vendor/babel/localedata/ka_GE.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ka_GE.dat and /dev/null differ diff --git a/vendor/babel/localedata/kab.dat b/vendor/babel/localedata/kab.dat deleted file mode 100644 index 409ba3b0..00000000 Binary files a/vendor/babel/localedata/kab.dat and /dev/null differ diff --git a/vendor/babel/localedata/kab_DZ.dat b/vendor/babel/localedata/kab_DZ.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/kab_DZ.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/kam.dat b/vendor/babel/localedata/kam.dat deleted file mode 100644 index 70c78d05..00000000 Binary files a/vendor/babel/localedata/kam.dat and /dev/null differ diff --git a/vendor/babel/localedata/kam_KE.dat b/vendor/babel/localedata/kam_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/kam_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/kde.dat b/vendor/babel/localedata/kde.dat deleted file mode 100644 index 3468c068..00000000 Binary files a/vendor/babel/localedata/kde.dat and /dev/null differ diff --git a/vendor/babel/localedata/kde_TZ.dat b/vendor/babel/localedata/kde_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/kde_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/kea.dat b/vendor/babel/localedata/kea.dat deleted file mode 100644 index 36e669e1..00000000 Binary files a/vendor/babel/localedata/kea.dat and /dev/null differ diff --git a/vendor/babel/localedata/kea_CV.dat b/vendor/babel/localedata/kea_CV.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/kea_CV.dat and /dev/null differ diff --git a/vendor/babel/localedata/khq.dat b/vendor/babel/localedata/khq.dat deleted file mode 100644 index 4e900a78..00000000 Binary files a/vendor/babel/localedata/khq.dat and /dev/null differ diff --git a/vendor/babel/localedata/khq_ML.dat b/vendor/babel/localedata/khq_ML.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/khq_ML.dat and /dev/null differ diff --git a/vendor/babel/localedata/ki.dat b/vendor/babel/localedata/ki.dat deleted file mode 100644 index d30ce01f..00000000 Binary files a/vendor/babel/localedata/ki.dat and /dev/null differ diff --git a/vendor/babel/localedata/ki_KE.dat b/vendor/babel/localedata/ki_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ki_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/kk.dat b/vendor/babel/localedata/kk.dat deleted file mode 100644 index 3125ea54..00000000 Binary files a/vendor/babel/localedata/kk.dat and /dev/null differ diff --git a/vendor/babel/localedata/kk_Cyrl.dat b/vendor/babel/localedata/kk_Cyrl.dat deleted file mode 100644 index 27760a1c..00000000 Binary files a/vendor/babel/localedata/kk_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/kk_Cyrl_KZ.dat b/vendor/babel/localedata/kk_Cyrl_KZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/kk_Cyrl_KZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/kkj.dat b/vendor/babel/localedata/kkj.dat deleted file mode 100644 index bd2d7b08..00000000 Binary files a/vendor/babel/localedata/kkj.dat and /dev/null differ diff --git a/vendor/babel/localedata/kkj_CM.dat b/vendor/babel/localedata/kkj_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/kkj_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/kl.dat b/vendor/babel/localedata/kl.dat deleted file mode 100644 index 5c13a69f..00000000 Binary files a/vendor/babel/localedata/kl.dat and /dev/null differ diff --git a/vendor/babel/localedata/kl_GL.dat b/vendor/babel/localedata/kl_GL.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/kl_GL.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/kln.dat b/vendor/babel/localedata/kln.dat deleted file mode 100644 index a5cc0352..00000000 Binary files a/vendor/babel/localedata/kln.dat and /dev/null differ diff --git a/vendor/babel/localedata/kln_KE.dat b/vendor/babel/localedata/kln_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/kln_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/km.dat b/vendor/babel/localedata/km.dat deleted file mode 100644 index 4613359c..00000000 Binary files a/vendor/babel/localedata/km.dat and /dev/null differ diff --git a/vendor/babel/localedata/km_KH.dat b/vendor/babel/localedata/km_KH.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/km_KH.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/kn.dat b/vendor/babel/localedata/kn.dat deleted file mode 100644 index c136cc5f..00000000 Binary files a/vendor/babel/localedata/kn.dat and /dev/null differ diff --git a/vendor/babel/localedata/kn_IN.dat b/vendor/babel/localedata/kn_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/kn_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ko.dat b/vendor/babel/localedata/ko.dat deleted file mode 100644 index 5910e0d6..00000000 Binary files a/vendor/babel/localedata/ko.dat and /dev/null differ diff --git a/vendor/babel/localedata/ko_KP.dat b/vendor/babel/localedata/ko_KP.dat deleted file mode 100644 index 4b4537f7..00000000 Binary files a/vendor/babel/localedata/ko_KP.dat and /dev/null differ diff --git a/vendor/babel/localedata/ko_KR.dat b/vendor/babel/localedata/ko_KR.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/ko_KR.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/kok.dat b/vendor/babel/localedata/kok.dat deleted file mode 100644 index 012c2e20..00000000 Binary files a/vendor/babel/localedata/kok.dat and /dev/null differ diff --git a/vendor/babel/localedata/kok_IN.dat b/vendor/babel/localedata/kok_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/kok_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ks.dat b/vendor/babel/localedata/ks.dat deleted file mode 100644 index bd4a4272..00000000 Binary files a/vendor/babel/localedata/ks.dat and /dev/null differ diff --git a/vendor/babel/localedata/ks_Arab.dat b/vendor/babel/localedata/ks_Arab.dat deleted file mode 100644 index 27760a1c..00000000 Binary files a/vendor/babel/localedata/ks_Arab.dat and /dev/null differ diff --git a/vendor/babel/localedata/ks_Arab_IN.dat b/vendor/babel/localedata/ks_Arab_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/ks_Arab_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ksb.dat b/vendor/babel/localedata/ksb.dat deleted file mode 100644 index 4a160056..00000000 Binary files a/vendor/babel/localedata/ksb.dat and /dev/null differ diff --git a/vendor/babel/localedata/ksb_TZ.dat b/vendor/babel/localedata/ksb_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ksb_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ksf.dat b/vendor/babel/localedata/ksf.dat deleted file mode 100644 index 11881d44..00000000 Binary files a/vendor/babel/localedata/ksf.dat and /dev/null differ diff --git a/vendor/babel/localedata/ksf_CM.dat b/vendor/babel/localedata/ksf_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ksf_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/ksh.dat b/vendor/babel/localedata/ksh.dat deleted file mode 100644 index 385e52e4..00000000 Binary files a/vendor/babel/localedata/ksh.dat and /dev/null differ diff --git a/vendor/babel/localedata/ksh_DE.dat b/vendor/babel/localedata/ksh_DE.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/ksh_DE.dat and /dev/null differ diff --git a/vendor/babel/localedata/kw.dat b/vendor/babel/localedata/kw.dat deleted file mode 100644 index 4056d0ad..00000000 Binary files a/vendor/babel/localedata/kw.dat and /dev/null differ diff --git a/vendor/babel/localedata/kw_GB.dat b/vendor/babel/localedata/kw_GB.dat deleted file mode 100644 index 401708ff..00000000 --- a/vendor/babel/localedata/kw_GB.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ky.dat b/vendor/babel/localedata/ky.dat deleted file mode 100644 index cff43ec9..00000000 Binary files a/vendor/babel/localedata/ky.dat and /dev/null differ diff --git a/vendor/babel/localedata/ky_KG.dat b/vendor/babel/localedata/ky_KG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ky_KG.dat and /dev/null differ diff --git a/vendor/babel/localedata/lag.dat b/vendor/babel/localedata/lag.dat deleted file mode 100644 index 922e70fb..00000000 Binary files a/vendor/babel/localedata/lag.dat and /dev/null differ diff --git a/vendor/babel/localedata/lag_TZ.dat b/vendor/babel/localedata/lag_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/lag_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/lg.dat b/vendor/babel/localedata/lg.dat deleted file mode 100644 index dbbf1eba..00000000 Binary files a/vendor/babel/localedata/lg.dat and /dev/null differ diff --git a/vendor/babel/localedata/lg_UG.dat b/vendor/babel/localedata/lg_UG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/lg_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/ln.dat b/vendor/babel/localedata/ln.dat deleted file mode 100644 index 320079bb..00000000 Binary files a/vendor/babel/localedata/ln.dat and /dev/null differ diff --git a/vendor/babel/localedata/ln_AO.dat b/vendor/babel/localedata/ln_AO.dat deleted file mode 100644 index dc23f4ec..00000000 Binary files a/vendor/babel/localedata/ln_AO.dat and /dev/null differ diff --git a/vendor/babel/localedata/ln_CD.dat b/vendor/babel/localedata/ln_CD.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/ln_CD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/ln_CF.dat b/vendor/babel/localedata/ln_CF.dat deleted file mode 100644 index 4c19ce3b..00000000 --- a/vendor/babel/localedata/ln_CF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqML U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/ln_CG.dat b/vendor/babel/localedata/ln_CG.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/ln_CG.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/lo.dat b/vendor/babel/localedata/lo.dat deleted file mode 100644 index 561bad36..00000000 Binary files a/vendor/babel/localedata/lo.dat and /dev/null differ diff --git a/vendor/babel/localedata/lo_LA.dat b/vendor/babel/localedata/lo_LA.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/lo_LA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/lt.dat b/vendor/babel/localedata/lt.dat deleted file mode 100644 index c05b9188..00000000 Binary files a/vendor/babel/localedata/lt.dat and /dev/null differ diff --git a/vendor/babel/localedata/lt_LT.dat b/vendor/babel/localedata/lt_LT.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/lt_LT.dat and /dev/null differ diff --git a/vendor/babel/localedata/lu.dat b/vendor/babel/localedata/lu.dat deleted file mode 100644 index 5d495573..00000000 Binary files a/vendor/babel/localedata/lu.dat and /dev/null differ diff --git a/vendor/babel/localedata/lu_CD.dat b/vendor/babel/localedata/lu_CD.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/lu_CD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/luo.dat b/vendor/babel/localedata/luo.dat deleted file mode 100644 index 326e796e..00000000 Binary files a/vendor/babel/localedata/luo.dat and /dev/null differ diff --git a/vendor/babel/localedata/luo_KE.dat b/vendor/babel/localedata/luo_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/luo_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/luy.dat b/vendor/babel/localedata/luy.dat deleted file mode 100644 index b9706a1f..00000000 Binary files a/vendor/babel/localedata/luy.dat and /dev/null differ diff --git a/vendor/babel/localedata/luy_KE.dat b/vendor/babel/localedata/luy_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/luy_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/lv.dat b/vendor/babel/localedata/lv.dat deleted file mode 100644 index a3f770d4..00000000 Binary files a/vendor/babel/localedata/lv.dat and /dev/null differ diff --git a/vendor/babel/localedata/lv_LV.dat b/vendor/babel/localedata/lv_LV.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/lv_LV.dat and /dev/null differ diff --git a/vendor/babel/localedata/mas.dat b/vendor/babel/localedata/mas.dat deleted file mode 100644 index 6f17d0de..00000000 Binary files a/vendor/babel/localedata/mas.dat and /dev/null differ diff --git a/vendor/babel/localedata/mas_KE.dat b/vendor/babel/localedata/mas_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/mas_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/mas_TZ.dat b/vendor/babel/localedata/mas_TZ.dat deleted file mode 100644 index 5e753954..00000000 Binary files a/vendor/babel/localedata/mas_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/mer.dat b/vendor/babel/localedata/mer.dat deleted file mode 100644 index 265eac28..00000000 Binary files a/vendor/babel/localedata/mer.dat and /dev/null differ diff --git a/vendor/babel/localedata/mer_KE.dat b/vendor/babel/localedata/mer_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/mer_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/mfe.dat b/vendor/babel/localedata/mfe.dat deleted file mode 100644 index e146820f..00000000 Binary files a/vendor/babel/localedata/mfe.dat and /dev/null differ diff --git a/vendor/babel/localedata/mfe_MU.dat b/vendor/babel/localedata/mfe_MU.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/mfe_MU.dat and /dev/null differ diff --git a/vendor/babel/localedata/mg.dat b/vendor/babel/localedata/mg.dat deleted file mode 100644 index bbab19f0..00000000 Binary files a/vendor/babel/localedata/mg.dat and /dev/null differ diff --git a/vendor/babel/localedata/mg_MG.dat b/vendor/babel/localedata/mg_MG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/mg_MG.dat and /dev/null differ diff --git a/vendor/babel/localedata/mgh.dat b/vendor/babel/localedata/mgh.dat deleted file mode 100644 index 12987fc6..00000000 Binary files a/vendor/babel/localedata/mgh.dat and /dev/null differ diff --git a/vendor/babel/localedata/mgh_MZ.dat b/vendor/babel/localedata/mgh_MZ.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/mgh_MZ.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/mgo.dat b/vendor/babel/localedata/mgo.dat deleted file mode 100644 index c90ac45f..00000000 Binary files a/vendor/babel/localedata/mgo.dat and /dev/null differ diff --git a/vendor/babel/localedata/mgo_CM.dat b/vendor/babel/localedata/mgo_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/mgo_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/mk.dat b/vendor/babel/localedata/mk.dat deleted file mode 100644 index 098b2262..00000000 Binary files a/vendor/babel/localedata/mk.dat and /dev/null differ diff --git a/vendor/babel/localedata/mk_MK.dat b/vendor/babel/localedata/mk_MK.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/mk_MK.dat and /dev/null differ diff --git a/vendor/babel/localedata/ml.dat b/vendor/babel/localedata/ml.dat deleted file mode 100644 index df293ad9..00000000 Binary files a/vendor/babel/localedata/ml.dat and /dev/null differ diff --git a/vendor/babel/localedata/ml_IN.dat b/vendor/babel/localedata/ml_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/ml_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/mn.dat b/vendor/babel/localedata/mn.dat deleted file mode 100644 index 8b3efac6..00000000 Binary files a/vendor/babel/localedata/mn.dat and /dev/null differ diff --git a/vendor/babel/localedata/mn_Cyrl.dat b/vendor/babel/localedata/mn_Cyrl.dat deleted file mode 100644 index 27760a1c..00000000 Binary files a/vendor/babel/localedata/mn_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/mn_Cyrl_MN.dat b/vendor/babel/localedata/mn_Cyrl_MN.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/mn_Cyrl_MN.dat and /dev/null differ diff --git a/vendor/babel/localedata/mr.dat b/vendor/babel/localedata/mr.dat deleted file mode 100644 index 29dde4e7..00000000 Binary files a/vendor/babel/localedata/mr.dat and /dev/null differ diff --git a/vendor/babel/localedata/mr_IN.dat b/vendor/babel/localedata/mr_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/mr_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ms.dat b/vendor/babel/localedata/ms.dat deleted file mode 100644 index a6974ea4..00000000 Binary files a/vendor/babel/localedata/ms.dat and /dev/null differ diff --git a/vendor/babel/localedata/ms_Latn.dat b/vendor/babel/localedata/ms_Latn.dat deleted file mode 100644 index 1ba8202e..00000000 Binary files a/vendor/babel/localedata/ms_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/ms_Latn_BN.dat b/vendor/babel/localedata/ms_Latn_BN.dat deleted file mode 100644 index d76b0c96..00000000 Binary files a/vendor/babel/localedata/ms_Latn_BN.dat and /dev/null differ diff --git a/vendor/babel/localedata/ms_Latn_MY.dat b/vendor/babel/localedata/ms_Latn_MY.dat deleted file mode 100644 index 50808663..00000000 Binary files a/vendor/babel/localedata/ms_Latn_MY.dat and /dev/null differ diff --git a/vendor/babel/localedata/ms_Latn_SG.dat b/vendor/babel/localedata/ms_Latn_SG.dat deleted file mode 100644 index 5120c305..00000000 Binary files a/vendor/babel/localedata/ms_Latn_SG.dat and /dev/null differ diff --git a/vendor/babel/localedata/mt.dat b/vendor/babel/localedata/mt.dat deleted file mode 100644 index cbf2feb1..00000000 Binary files a/vendor/babel/localedata/mt.dat and /dev/null differ diff --git a/vendor/babel/localedata/mt_MT.dat b/vendor/babel/localedata/mt_MT.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/mt_MT.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/mua.dat b/vendor/babel/localedata/mua.dat deleted file mode 100644 index dd4881c8..00000000 Binary files a/vendor/babel/localedata/mua.dat and /dev/null differ diff --git a/vendor/babel/localedata/mua_CM.dat b/vendor/babel/localedata/mua_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/mua_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/my.dat b/vendor/babel/localedata/my.dat deleted file mode 100644 index 6482e5ea..00000000 Binary files a/vendor/babel/localedata/my.dat and /dev/null differ diff --git a/vendor/babel/localedata/my_MM.dat b/vendor/babel/localedata/my_MM.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/my_MM.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/naq.dat b/vendor/babel/localedata/naq.dat deleted file mode 100644 index 87fc01f8..00000000 Binary files a/vendor/babel/localedata/naq.dat and /dev/null differ diff --git a/vendor/babel/localedata/naq_NA.dat b/vendor/babel/localedata/naq_NA.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/naq_NA.dat and /dev/null differ diff --git a/vendor/babel/localedata/nb.dat b/vendor/babel/localedata/nb.dat deleted file mode 100644 index aa34863b..00000000 Binary files a/vendor/babel/localedata/nb.dat and /dev/null differ diff --git a/vendor/babel/localedata/nb_NO.dat b/vendor/babel/localedata/nb_NO.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/nb_NO.dat and /dev/null differ diff --git a/vendor/babel/localedata/nd.dat b/vendor/babel/localedata/nd.dat deleted file mode 100644 index 231008ec..00000000 Binary files a/vendor/babel/localedata/nd.dat and /dev/null differ diff --git a/vendor/babel/localedata/nd_ZW.dat b/vendor/babel/localedata/nd_ZW.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/nd_ZW.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ne.dat b/vendor/babel/localedata/ne.dat deleted file mode 100644 index d078fcf4..00000000 Binary files a/vendor/babel/localedata/ne.dat and /dev/null differ diff --git a/vendor/babel/localedata/ne_IN.dat b/vendor/babel/localedata/ne_IN.dat deleted file mode 100644 index 8044567a..00000000 Binary files a/vendor/babel/localedata/ne_IN.dat and /dev/null differ diff --git a/vendor/babel/localedata/ne_NP.dat b/vendor/babel/localedata/ne_NP.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/ne_NP.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/nl.dat b/vendor/babel/localedata/nl.dat deleted file mode 100644 index ace2ad64..00000000 Binary files a/vendor/babel/localedata/nl.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_AW.dat b/vendor/babel/localedata/nl_AW.dat deleted file mode 100644 index 6f68c997..00000000 Binary files a/vendor/babel/localedata/nl_AW.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_BE.dat b/vendor/babel/localedata/nl_BE.dat deleted file mode 100644 index c722c78c..00000000 Binary files a/vendor/babel/localedata/nl_BE.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_CW.dat b/vendor/babel/localedata/nl_CW.dat deleted file mode 100644 index 11b34d92..00000000 Binary files a/vendor/babel/localedata/nl_CW.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_NL.dat b/vendor/babel/localedata/nl_NL.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/nl_NL.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_SR.dat b/vendor/babel/localedata/nl_SR.dat deleted file mode 100644 index 91804fbf..00000000 Binary files a/vendor/babel/localedata/nl_SR.dat and /dev/null differ diff --git a/vendor/babel/localedata/nl_SX.dat b/vendor/babel/localedata/nl_SX.dat deleted file mode 100644 index 11b34d92..00000000 Binary files a/vendor/babel/localedata/nl_SX.dat and /dev/null differ diff --git a/vendor/babel/localedata/nmg.dat b/vendor/babel/localedata/nmg.dat deleted file mode 100644 index 41987a1e..00000000 Binary files a/vendor/babel/localedata/nmg.dat and /dev/null differ diff --git a/vendor/babel/localedata/nmg_CM.dat b/vendor/babel/localedata/nmg_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/nmg_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/nn.dat b/vendor/babel/localedata/nn.dat deleted file mode 100644 index c810a884..00000000 Binary files a/vendor/babel/localedata/nn.dat and /dev/null differ diff --git a/vendor/babel/localedata/nn_NO.dat b/vendor/babel/localedata/nn_NO.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/nn_NO.dat and /dev/null differ diff --git a/vendor/babel/localedata/nnh.dat b/vendor/babel/localedata/nnh.dat deleted file mode 100644 index 28dbf242..00000000 Binary files a/vendor/babel/localedata/nnh.dat and /dev/null differ diff --git a/vendor/babel/localedata/nnh_CM.dat b/vendor/babel/localedata/nnh_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/nnh_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/nr.dat b/vendor/babel/localedata/nr.dat deleted file mode 100644 index 019afc0a..00000000 Binary files a/vendor/babel/localedata/nr.dat and /dev/null differ diff --git a/vendor/babel/localedata/nr_ZA.dat b/vendor/babel/localedata/nr_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/nr_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/nso.dat b/vendor/babel/localedata/nso.dat deleted file mode 100644 index 0a37bd06..00000000 Binary files a/vendor/babel/localedata/nso.dat and /dev/null differ diff --git a/vendor/babel/localedata/nso_ZA.dat b/vendor/babel/localedata/nso_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/nso_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/nus.dat b/vendor/babel/localedata/nus.dat deleted file mode 100644 index 3ad9c0cc..00000000 Binary files a/vendor/babel/localedata/nus.dat and /dev/null differ diff --git a/vendor/babel/localedata/nus_SD.dat b/vendor/babel/localedata/nus_SD.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/nus_SD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/nyn.dat b/vendor/babel/localedata/nyn.dat deleted file mode 100644 index f13b314b..00000000 Binary files a/vendor/babel/localedata/nyn.dat and /dev/null differ diff --git a/vendor/babel/localedata/nyn_UG.dat b/vendor/babel/localedata/nyn_UG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/nyn_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/om.dat b/vendor/babel/localedata/om.dat deleted file mode 100644 index 53213cfe..00000000 Binary files a/vendor/babel/localedata/om.dat and /dev/null differ diff --git a/vendor/babel/localedata/om_ET.dat b/vendor/babel/localedata/om_ET.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/om_ET.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/om_KE.dat b/vendor/babel/localedata/om_KE.dat deleted file mode 100644 index b126edd7..00000000 Binary files a/vendor/babel/localedata/om_KE.dat and /dev/null differ diff --git a/vendor/babel/localedata/or.dat b/vendor/babel/localedata/or.dat deleted file mode 100644 index c01ab442..00000000 Binary files a/vendor/babel/localedata/or.dat and /dev/null differ diff --git a/vendor/babel/localedata/or_IN.dat b/vendor/babel/localedata/or_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/or_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/os.dat b/vendor/babel/localedata/os.dat deleted file mode 100644 index 0a01ac8b..00000000 Binary files a/vendor/babel/localedata/os.dat and /dev/null differ diff --git a/vendor/babel/localedata/os_GE.dat b/vendor/babel/localedata/os_GE.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/os_GE.dat and /dev/null differ diff --git a/vendor/babel/localedata/os_RU.dat b/vendor/babel/localedata/os_RU.dat deleted file mode 100644 index b1b0d843..00000000 Binary files a/vendor/babel/localedata/os_RU.dat and /dev/null differ diff --git a/vendor/babel/localedata/pa.dat b/vendor/babel/localedata/pa.dat deleted file mode 100644 index 100c3779..00000000 Binary files a/vendor/babel/localedata/pa.dat and /dev/null differ diff --git a/vendor/babel/localedata/pa_Arab.dat b/vendor/babel/localedata/pa_Arab.dat deleted file mode 100644 index b32025cf..00000000 Binary files a/vendor/babel/localedata/pa_Arab.dat and /dev/null differ diff --git a/vendor/babel/localedata/pa_Arab_PK.dat b/vendor/babel/localedata/pa_Arab_PK.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/pa_Arab_PK.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/pa_Guru.dat b/vendor/babel/localedata/pa_Guru.dat deleted file mode 100644 index 27760a1c..00000000 Binary files a/vendor/babel/localedata/pa_Guru.dat and /dev/null differ diff --git a/vendor/babel/localedata/pa_Guru_IN.dat b/vendor/babel/localedata/pa_Guru_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/pa_Guru_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/pl.dat b/vendor/babel/localedata/pl.dat deleted file mode 100644 index 11abe814..00000000 Binary files a/vendor/babel/localedata/pl.dat and /dev/null differ diff --git a/vendor/babel/localedata/pl_PL.dat b/vendor/babel/localedata/pl_PL.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/pl_PL.dat and /dev/null differ diff --git a/vendor/babel/localedata/ps.dat b/vendor/babel/localedata/ps.dat deleted file mode 100644 index 6521e4b3..00000000 Binary files a/vendor/babel/localedata/ps.dat and /dev/null differ diff --git a/vendor/babel/localedata/ps_AF.dat b/vendor/babel/localedata/ps_AF.dat deleted file mode 100644 index 150c7e3b..00000000 --- a/vendor/babel/localedata/ps_AF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/pt.dat b/vendor/babel/localedata/pt.dat deleted file mode 100644 index bf22a14e..00000000 Binary files a/vendor/babel/localedata/pt.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_AO.dat b/vendor/babel/localedata/pt_AO.dat deleted file mode 100644 index cd4cd79d..00000000 Binary files a/vendor/babel/localedata/pt_AO.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_BR.dat b/vendor/babel/localedata/pt_BR.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/pt_BR.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/pt_CV.dat b/vendor/babel/localedata/pt_CV.dat deleted file mode 100644 index 98f10615..00000000 Binary files a/vendor/babel/localedata/pt_CV.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_GW.dat b/vendor/babel/localedata/pt_GW.dat deleted file mode 100644 index 98f10615..00000000 Binary files a/vendor/babel/localedata/pt_GW.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_MO.dat b/vendor/babel/localedata/pt_MO.dat deleted file mode 100644 index d40c46f1..00000000 Binary files a/vendor/babel/localedata/pt_MO.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_MZ.dat b/vendor/babel/localedata/pt_MZ.dat deleted file mode 100644 index bfd25510..00000000 Binary files a/vendor/babel/localedata/pt_MZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_PT.dat b/vendor/babel/localedata/pt_PT.dat deleted file mode 100644 index 203b692c..00000000 Binary files a/vendor/babel/localedata/pt_PT.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_ST.dat b/vendor/babel/localedata/pt_ST.dat deleted file mode 100644 index 30882bde..00000000 Binary files a/vendor/babel/localedata/pt_ST.dat and /dev/null differ diff --git a/vendor/babel/localedata/pt_TL.dat b/vendor/babel/localedata/pt_TL.dat deleted file mode 100644 index 6b5a0f9f..00000000 Binary files a/vendor/babel/localedata/pt_TL.dat and /dev/null differ diff --git a/vendor/babel/localedata/rm.dat b/vendor/babel/localedata/rm.dat deleted file mode 100644 index c03c5aed..00000000 Binary files a/vendor/babel/localedata/rm.dat and /dev/null differ diff --git a/vendor/babel/localedata/rm_CH.dat b/vendor/babel/localedata/rm_CH.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/rm_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/rn.dat b/vendor/babel/localedata/rn.dat deleted file mode 100644 index 50efb2e9..00000000 Binary files a/vendor/babel/localedata/rn.dat and /dev/null differ diff --git a/vendor/babel/localedata/rn_BI.dat b/vendor/babel/localedata/rn_BI.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/rn_BI.dat and /dev/null differ diff --git a/vendor/babel/localedata/ro.dat b/vendor/babel/localedata/ro.dat deleted file mode 100644 index 18ee70fa..00000000 Binary files a/vendor/babel/localedata/ro.dat and /dev/null differ diff --git a/vendor/babel/localedata/ro_MD.dat b/vendor/babel/localedata/ro_MD.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ro_MD.dat and /dev/null differ diff --git a/vendor/babel/localedata/ro_RO.dat b/vendor/babel/localedata/ro_RO.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ro_RO.dat and /dev/null differ diff --git a/vendor/babel/localedata/rof.dat b/vendor/babel/localedata/rof.dat deleted file mode 100644 index 28f38de9..00000000 Binary files a/vendor/babel/localedata/rof.dat and /dev/null differ diff --git a/vendor/babel/localedata/rof_TZ.dat b/vendor/babel/localedata/rof_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/rof_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/root.dat b/vendor/babel/localedata/root.dat deleted file mode 100644 index 80ab0eb7..00000000 Binary files a/vendor/babel/localedata/root.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru.dat b/vendor/babel/localedata/ru.dat deleted file mode 100644 index fc116df8..00000000 Binary files a/vendor/babel/localedata/ru.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_BY.dat b/vendor/babel/localedata/ru_BY.dat deleted file mode 100644 index 640cb796..00000000 Binary files a/vendor/babel/localedata/ru_BY.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_KG.dat b/vendor/babel/localedata/ru_KG.dat deleted file mode 100644 index 87f33896..00000000 Binary files a/vendor/babel/localedata/ru_KG.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_KZ.dat b/vendor/babel/localedata/ru_KZ.dat deleted file mode 100644 index 62d0b246..00000000 Binary files a/vendor/babel/localedata/ru_KZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_MD.dat b/vendor/babel/localedata/ru_MD.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ru_MD.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_RU.dat b/vendor/babel/localedata/ru_RU.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/ru_RU.dat and /dev/null differ diff --git a/vendor/babel/localedata/ru_UA.dat b/vendor/babel/localedata/ru_UA.dat deleted file mode 100644 index c726ba81..00000000 Binary files a/vendor/babel/localedata/ru_UA.dat and /dev/null differ diff --git a/vendor/babel/localedata/rw.dat b/vendor/babel/localedata/rw.dat deleted file mode 100644 index 0403c67b..00000000 Binary files a/vendor/babel/localedata/rw.dat and /dev/null differ diff --git a/vendor/babel/localedata/rw_RW.dat b/vendor/babel/localedata/rw_RW.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/rw_RW.dat and /dev/null differ diff --git a/vendor/babel/localedata/rwk.dat b/vendor/babel/localedata/rwk.dat deleted file mode 100644 index d18d3144..00000000 Binary files a/vendor/babel/localedata/rwk.dat and /dev/null differ diff --git a/vendor/babel/localedata/rwk_TZ.dat b/vendor/babel/localedata/rwk_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/rwk_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/sah.dat b/vendor/babel/localedata/sah.dat deleted file mode 100644 index 38fb037c..00000000 Binary files a/vendor/babel/localedata/sah.dat and /dev/null differ diff --git a/vendor/babel/localedata/sah_RU.dat b/vendor/babel/localedata/sah_RU.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sah_RU.dat and /dev/null differ diff --git a/vendor/babel/localedata/saq.dat b/vendor/babel/localedata/saq.dat deleted file mode 100644 index 858cb6a0..00000000 Binary files a/vendor/babel/localedata/saq.dat and /dev/null differ diff --git a/vendor/babel/localedata/saq_KE.dat b/vendor/babel/localedata/saq_KE.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/saq_KE.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/sbp.dat b/vendor/babel/localedata/sbp.dat deleted file mode 100644 index 36f50811..00000000 Binary files a/vendor/babel/localedata/sbp.dat and /dev/null differ diff --git a/vendor/babel/localedata/sbp_TZ.dat b/vendor/babel/localedata/sbp_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/sbp_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/se.dat b/vendor/babel/localedata/se.dat deleted file mode 100644 index 4093ad3e..00000000 Binary files a/vendor/babel/localedata/se.dat and /dev/null differ diff --git a/vendor/babel/localedata/se_FI.dat b/vendor/babel/localedata/se_FI.dat deleted file mode 100644 index 561ae0fd..00000000 Binary files a/vendor/babel/localedata/se_FI.dat and /dev/null differ diff --git a/vendor/babel/localedata/se_NO.dat b/vendor/babel/localedata/se_NO.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/se_NO.dat and /dev/null differ diff --git a/vendor/babel/localedata/seh.dat b/vendor/babel/localedata/seh.dat deleted file mode 100644 index 63922010..00000000 Binary files a/vendor/babel/localedata/seh.dat and /dev/null differ diff --git a/vendor/babel/localedata/seh_MZ.dat b/vendor/babel/localedata/seh_MZ.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/seh_MZ.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ses.dat b/vendor/babel/localedata/ses.dat deleted file mode 100644 index d537b3f8..00000000 Binary files a/vendor/babel/localedata/ses.dat and /dev/null differ diff --git a/vendor/babel/localedata/ses_ML.dat b/vendor/babel/localedata/ses_ML.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ses_ML.dat and /dev/null differ diff --git a/vendor/babel/localedata/sg.dat b/vendor/babel/localedata/sg.dat deleted file mode 100644 index f7e398c8..00000000 Binary files a/vendor/babel/localedata/sg.dat and /dev/null differ diff --git a/vendor/babel/localedata/sg_CF.dat b/vendor/babel/localedata/sg_CF.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/sg_CF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/shi.dat b/vendor/babel/localedata/shi.dat deleted file mode 100644 index 8adbb59a..00000000 Binary files a/vendor/babel/localedata/shi.dat and /dev/null differ diff --git a/vendor/babel/localedata/shi_Latn.dat b/vendor/babel/localedata/shi_Latn.dat deleted file mode 100644 index 1cb6147c..00000000 Binary files a/vendor/babel/localedata/shi_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/shi_Latn_MA.dat b/vendor/babel/localedata/shi_Latn_MA.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/shi_Latn_MA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/shi_Tfng.dat b/vendor/babel/localedata/shi_Tfng.dat deleted file mode 100644 index 21966f5f..00000000 Binary files a/vendor/babel/localedata/shi_Tfng.dat and /dev/null differ diff --git a/vendor/babel/localedata/shi_Tfng_MA.dat b/vendor/babel/localedata/shi_Tfng_MA.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/shi_Tfng_MA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/si.dat b/vendor/babel/localedata/si.dat deleted file mode 100644 index ecb30a76..00000000 Binary files a/vendor/babel/localedata/si.dat and /dev/null differ diff --git a/vendor/babel/localedata/si_LK.dat b/vendor/babel/localedata/si_LK.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/si_LK.dat and /dev/null differ diff --git a/vendor/babel/localedata/sk.dat b/vendor/babel/localedata/sk.dat deleted file mode 100644 index 59fa86d2..00000000 Binary files a/vendor/babel/localedata/sk.dat and /dev/null differ diff --git a/vendor/babel/localedata/sk_SK.dat b/vendor/babel/localedata/sk_SK.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/sk_SK.dat and /dev/null differ diff --git a/vendor/babel/localedata/sl.dat b/vendor/babel/localedata/sl.dat deleted file mode 100644 index 7810cc35..00000000 Binary files a/vendor/babel/localedata/sl.dat and /dev/null differ diff --git a/vendor/babel/localedata/sl_SI.dat b/vendor/babel/localedata/sl_SI.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sl_SI.dat and /dev/null differ diff --git a/vendor/babel/localedata/sn.dat b/vendor/babel/localedata/sn.dat deleted file mode 100644 index 29c97a36..00000000 Binary files a/vendor/babel/localedata/sn.dat and /dev/null differ diff --git a/vendor/babel/localedata/sn_ZW.dat b/vendor/babel/localedata/sn_ZW.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/sn_ZW.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/so.dat b/vendor/babel/localedata/so.dat deleted file mode 100644 index 7844a445..00000000 Binary files a/vendor/babel/localedata/so.dat and /dev/null differ diff --git a/vendor/babel/localedata/so_DJ.dat b/vendor/babel/localedata/so_DJ.dat deleted file mode 100644 index b0ee4421..00000000 Binary files a/vendor/babel/localedata/so_DJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/so_ET.dat b/vendor/babel/localedata/so_ET.dat deleted file mode 100644 index b4322296..00000000 Binary files a/vendor/babel/localedata/so_ET.dat and /dev/null differ diff --git a/vendor/babel/localedata/so_KE.dat b/vendor/babel/localedata/so_KE.dat deleted file mode 100644 index b126edd7..00000000 Binary files a/vendor/babel/localedata/so_KE.dat and /dev/null differ diff --git a/vendor/babel/localedata/so_SO.dat b/vendor/babel/localedata/so_SO.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/so_SO.dat and /dev/null differ diff --git a/vendor/babel/localedata/sq.dat b/vendor/babel/localedata/sq.dat deleted file mode 100644 index a03fcf0a..00000000 Binary files a/vendor/babel/localedata/sq.dat and /dev/null differ diff --git a/vendor/babel/localedata/sq_AL.dat b/vendor/babel/localedata/sq_AL.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sq_AL.dat and /dev/null differ diff --git a/vendor/babel/localedata/sq_MK.dat b/vendor/babel/localedata/sq_MK.dat deleted file mode 100644 index 65bfa5c8..00000000 Binary files a/vendor/babel/localedata/sq_MK.dat and /dev/null differ diff --git a/vendor/babel/localedata/sq_XK.dat b/vendor/babel/localedata/sq_XK.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sq_XK.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr.dat b/vendor/babel/localedata/sr.dat deleted file mode 100644 index ef9cd2cc..00000000 Binary files a/vendor/babel/localedata/sr.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Cyrl.dat b/vendor/babel/localedata/sr_Cyrl.dat deleted file mode 100644 index a882c5bc..00000000 Binary files a/vendor/babel/localedata/sr_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Cyrl_BA.dat b/vendor/babel/localedata/sr_Cyrl_BA.dat deleted file mode 100644 index 8d9fa9f5..00000000 Binary files a/vendor/babel/localedata/sr_Cyrl_BA.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Cyrl_ME.dat b/vendor/babel/localedata/sr_Cyrl_ME.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Cyrl_ME.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Cyrl_RS.dat b/vendor/babel/localedata/sr_Cyrl_RS.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Cyrl_RS.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Cyrl_XK.dat b/vendor/babel/localedata/sr_Cyrl_XK.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Cyrl_XK.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Latn.dat b/vendor/babel/localedata/sr_Latn.dat deleted file mode 100644 index 57b44f1e..00000000 Binary files a/vendor/babel/localedata/sr_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Latn_BA.dat b/vendor/babel/localedata/sr_Latn_BA.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Latn_BA.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Latn_ME.dat b/vendor/babel/localedata/sr_Latn_ME.dat deleted file mode 100644 index b0f4d9b3..00000000 Binary files a/vendor/babel/localedata/sr_Latn_ME.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Latn_RS.dat b/vendor/babel/localedata/sr_Latn_RS.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Latn_RS.dat and /dev/null differ diff --git a/vendor/babel/localedata/sr_Latn_XK.dat b/vendor/babel/localedata/sr_Latn_XK.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/sr_Latn_XK.dat and /dev/null differ diff --git a/vendor/babel/localedata/ss.dat b/vendor/babel/localedata/ss.dat deleted file mode 100644 index c7807243..00000000 Binary files a/vendor/babel/localedata/ss.dat and /dev/null differ diff --git a/vendor/babel/localedata/ss_SZ.dat b/vendor/babel/localedata/ss_SZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ss_SZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/ss_ZA.dat b/vendor/babel/localedata/ss_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ss_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/ssy.dat b/vendor/babel/localedata/ssy.dat deleted file mode 100644 index 926fd85d..00000000 Binary files a/vendor/babel/localedata/ssy.dat and /dev/null differ diff --git a/vendor/babel/localedata/ssy_ER.dat b/vendor/babel/localedata/ssy_ER.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/ssy_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/st.dat b/vendor/babel/localedata/st.dat deleted file mode 100644 index d19795e3..00000000 Binary files a/vendor/babel/localedata/st.dat and /dev/null differ diff --git a/vendor/babel/localedata/st_LS.dat b/vendor/babel/localedata/st_LS.dat deleted file mode 100644 index 348a1678..00000000 Binary files a/vendor/babel/localedata/st_LS.dat and /dev/null differ diff --git a/vendor/babel/localedata/st_ZA.dat b/vendor/babel/localedata/st_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/st_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/sv.dat b/vendor/babel/localedata/sv.dat deleted file mode 100644 index bce70f15..00000000 Binary files a/vendor/babel/localedata/sv.dat and /dev/null differ diff --git a/vendor/babel/localedata/sv_AX.dat b/vendor/babel/localedata/sv_AX.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/sv_AX.dat and /dev/null differ diff --git a/vendor/babel/localedata/sv_FI.dat b/vendor/babel/localedata/sv_FI.dat deleted file mode 100644 index f42189dc..00000000 Binary files a/vendor/babel/localedata/sv_FI.dat and /dev/null differ diff --git a/vendor/babel/localedata/sv_SE.dat b/vendor/babel/localedata/sv_SE.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/sv_SE.dat and /dev/null differ diff --git a/vendor/babel/localedata/sw.dat b/vendor/babel/localedata/sw.dat deleted file mode 100644 index 6c525cad..00000000 Binary files a/vendor/babel/localedata/sw.dat and /dev/null differ diff --git a/vendor/babel/localedata/sw_KE.dat b/vendor/babel/localedata/sw_KE.dat deleted file mode 100644 index 3c922118..00000000 Binary files a/vendor/babel/localedata/sw_KE.dat and /dev/null differ diff --git a/vendor/babel/localedata/sw_TZ.dat b/vendor/babel/localedata/sw_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/sw_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/sw_UG.dat b/vendor/babel/localedata/sw_UG.dat deleted file mode 100644 index 3b68eaa1..00000000 Binary files a/vendor/babel/localedata/sw_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/swc.dat b/vendor/babel/localedata/swc.dat deleted file mode 100644 index 247ea257..00000000 Binary files a/vendor/babel/localedata/swc.dat and /dev/null differ diff --git a/vendor/babel/localedata/swc_CD.dat b/vendor/babel/localedata/swc_CD.dat deleted file mode 100644 index a7cee6f2..00000000 --- a/vendor/babel/localedata/swc_CD.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U unit_patternsq"}q#u. \ No newline at end of file diff --git a/vendor/babel/localedata/ta.dat b/vendor/babel/localedata/ta.dat deleted file mode 100644 index 9115227a..00000000 Binary files a/vendor/babel/localedata/ta.dat and /dev/null differ diff --git a/vendor/babel/localedata/ta_IN.dat b/vendor/babel/localedata/ta_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/ta_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/ta_LK.dat b/vendor/babel/localedata/ta_LK.dat deleted file mode 100644 index 482cab29..00000000 Binary files a/vendor/babel/localedata/ta_LK.dat and /dev/null differ diff --git a/vendor/babel/localedata/ta_MY.dat b/vendor/babel/localedata/ta_MY.dat deleted file mode 100644 index d3980d49..00000000 Binary files a/vendor/babel/localedata/ta_MY.dat and /dev/null differ diff --git a/vendor/babel/localedata/ta_SG.dat b/vendor/babel/localedata/ta_SG.dat deleted file mode 100644 index d2b3c0c9..00000000 Binary files a/vendor/babel/localedata/ta_SG.dat and /dev/null differ diff --git a/vendor/babel/localedata/te.dat b/vendor/babel/localedata/te.dat deleted file mode 100644 index 82dde54f..00000000 Binary files a/vendor/babel/localedata/te.dat and /dev/null differ diff --git a/vendor/babel/localedata/te_IN.dat b/vendor/babel/localedata/te_IN.dat deleted file mode 100644 index 1564619c..00000000 --- a/vendor/babel/localedata/te_IN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U unit_patternsq$}q%u. \ No newline at end of file diff --git a/vendor/babel/localedata/teo.dat b/vendor/babel/localedata/teo.dat deleted file mode 100644 index 15490c3a..00000000 Binary files a/vendor/babel/localedata/teo.dat and /dev/null differ diff --git a/vendor/babel/localedata/teo_KE.dat b/vendor/babel/localedata/teo_KE.dat deleted file mode 100644 index b126edd7..00000000 Binary files a/vendor/babel/localedata/teo_KE.dat and /dev/null differ diff --git a/vendor/babel/localedata/teo_UG.dat b/vendor/babel/localedata/teo_UG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/teo_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/tg.dat b/vendor/babel/localedata/tg.dat deleted file mode 100644 index 2ca7e3c8..00000000 Binary files a/vendor/babel/localedata/tg.dat and /dev/null differ diff --git a/vendor/babel/localedata/tg_Cyrl.dat b/vendor/babel/localedata/tg_Cyrl.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/tg_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/tg_Cyrl_TJ.dat b/vendor/babel/localedata/tg_Cyrl_TJ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/tg_Cyrl_TJ.dat and /dev/null differ diff --git a/vendor/babel/localedata/th.dat b/vendor/babel/localedata/th.dat deleted file mode 100644 index e052aa59..00000000 Binary files a/vendor/babel/localedata/th.dat and /dev/null differ diff --git a/vendor/babel/localedata/th_TH.dat b/vendor/babel/localedata/th_TH.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/th_TH.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/ti.dat b/vendor/babel/localedata/ti.dat deleted file mode 100644 index b5ce842b..00000000 Binary files a/vendor/babel/localedata/ti.dat and /dev/null differ diff --git a/vendor/babel/localedata/ti_ER.dat b/vendor/babel/localedata/ti_ER.dat deleted file mode 100644 index 1e2f1e30..00000000 Binary files a/vendor/babel/localedata/ti_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/ti_ET.dat b/vendor/babel/localedata/ti_ET.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ti_ET.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/tig.dat b/vendor/babel/localedata/tig.dat deleted file mode 100644 index 8e82e8f6..00000000 Binary files a/vendor/babel/localedata/tig.dat and /dev/null differ diff --git a/vendor/babel/localedata/tig_ER.dat b/vendor/babel/localedata/tig_ER.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/tig_ER.dat and /dev/null differ diff --git a/vendor/babel/localedata/tn.dat b/vendor/babel/localedata/tn.dat deleted file mode 100644 index d1a30428..00000000 Binary files a/vendor/babel/localedata/tn.dat and /dev/null differ diff --git a/vendor/babel/localedata/tn_BW.dat b/vendor/babel/localedata/tn_BW.dat deleted file mode 100644 index 325d2b09..00000000 Binary files a/vendor/babel/localedata/tn_BW.dat and /dev/null differ diff --git a/vendor/babel/localedata/tn_ZA.dat b/vendor/babel/localedata/tn_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/tn_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/to.dat b/vendor/babel/localedata/to.dat deleted file mode 100644 index 80c43614..00000000 Binary files a/vendor/babel/localedata/to.dat and /dev/null differ diff --git a/vendor/babel/localedata/to_TO.dat b/vendor/babel/localedata/to_TO.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/to_TO.dat and /dev/null differ diff --git a/vendor/babel/localedata/tr.dat b/vendor/babel/localedata/tr.dat deleted file mode 100644 index 292d0071..00000000 Binary files a/vendor/babel/localedata/tr.dat and /dev/null differ diff --git a/vendor/babel/localedata/tr_CY.dat b/vendor/babel/localedata/tr_CY.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/tr_CY.dat and /dev/null differ diff --git a/vendor/babel/localedata/tr_TR.dat b/vendor/babel/localedata/tr_TR.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/tr_TR.dat and /dev/null differ diff --git a/vendor/babel/localedata/ts.dat b/vendor/babel/localedata/ts.dat deleted file mode 100644 index c24c9e40..00000000 Binary files a/vendor/babel/localedata/ts.dat and /dev/null differ diff --git a/vendor/babel/localedata/ts_ZA.dat b/vendor/babel/localedata/ts_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ts_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/twq.dat b/vendor/babel/localedata/twq.dat deleted file mode 100644 index f9c3773a..00000000 Binary files a/vendor/babel/localedata/twq.dat and /dev/null differ diff --git a/vendor/babel/localedata/twq_NE.dat b/vendor/babel/localedata/twq_NE.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/twq_NE.dat and /dev/null differ diff --git a/vendor/babel/localedata/tzm.dat b/vendor/babel/localedata/tzm.dat deleted file mode 100644 index 2543fb39..00000000 Binary files a/vendor/babel/localedata/tzm.dat and /dev/null differ diff --git a/vendor/babel/localedata/tzm_Latn.dat b/vendor/babel/localedata/tzm_Latn.dat deleted file mode 100644 index 500194c5..00000000 Binary files a/vendor/babel/localedata/tzm_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/tzm_Latn_MA.dat b/vendor/babel/localedata/tzm_Latn_MA.dat deleted file mode 100644 index 01c29af9..00000000 --- a/vendor/babel/localedata/tzm_Latn_MA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/uk.dat b/vendor/babel/localedata/uk.dat deleted file mode 100644 index 2ce73b76..00000000 Binary files a/vendor/babel/localedata/uk.dat and /dev/null differ diff --git a/vendor/babel/localedata/uk_UA.dat b/vendor/babel/localedata/uk_UA.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/uk_UA.dat and /dev/null differ diff --git a/vendor/babel/localedata/ur.dat b/vendor/babel/localedata/ur.dat deleted file mode 100644 index 325b5415..00000000 Binary files a/vendor/babel/localedata/ur.dat and /dev/null differ diff --git a/vendor/babel/localedata/ur_IN.dat b/vendor/babel/localedata/ur_IN.dat deleted file mode 100644 index a8f52387..00000000 Binary files a/vendor/babel/localedata/ur_IN.dat and /dev/null differ diff --git a/vendor/babel/localedata/ur_PK.dat b/vendor/babel/localedata/ur_PK.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/ur_PK.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/uz.dat b/vendor/babel/localedata/uz.dat deleted file mode 100644 index 799049cf..00000000 Binary files a/vendor/babel/localedata/uz.dat and /dev/null differ diff --git a/vendor/babel/localedata/uz_Arab.dat b/vendor/babel/localedata/uz_Arab.dat deleted file mode 100644 index 29426be7..00000000 Binary files a/vendor/babel/localedata/uz_Arab.dat and /dev/null differ diff --git a/vendor/babel/localedata/uz_Arab_AF.dat b/vendor/babel/localedata/uz_Arab_AF.dat deleted file mode 100644 index 150c7e3b..00000000 --- a/vendor/babel/localedata/uz_Arab_AF.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (U weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U unit_patternsq%}q&u. \ No newline at end of file diff --git a/vendor/babel/localedata/uz_Cyrl.dat b/vendor/babel/localedata/uz_Cyrl.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/uz_Cyrl.dat and /dev/null differ diff --git a/vendor/babel/localedata/uz_Cyrl_UZ.dat b/vendor/babel/localedata/uz_Cyrl_UZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/uz_Cyrl_UZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/uz_Latn.dat b/vendor/babel/localedata/uz_Latn.dat deleted file mode 100644 index febdc7c3..00000000 Binary files a/vendor/babel/localedata/uz_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/uz_Latn_UZ.dat b/vendor/babel/localedata/uz_Latn_UZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/uz_Latn_UZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/vai.dat b/vendor/babel/localedata/vai.dat deleted file mode 100644 index 8decb6fe..00000000 Binary files a/vendor/babel/localedata/vai.dat and /dev/null differ diff --git a/vendor/babel/localedata/vai_Latn.dat b/vendor/babel/localedata/vai_Latn.dat deleted file mode 100644 index 8b6be240..00000000 Binary files a/vendor/babel/localedata/vai_Latn.dat and /dev/null differ diff --git a/vendor/babel/localedata/vai_Latn_LR.dat b/vendor/babel/localedata/vai_Latn_LR.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/vai_Latn_LR.dat and /dev/null differ diff --git a/vendor/babel/localedata/vai_Vaii.dat b/vendor/babel/localedata/vai_Vaii.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/vai_Vaii.dat and /dev/null differ diff --git a/vendor/babel/localedata/vai_Vaii_LR.dat b/vendor/babel/localedata/vai_Vaii_LR.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/vai_Vaii_LR.dat and /dev/null differ diff --git a/vendor/babel/localedata/ve.dat b/vendor/babel/localedata/ve.dat deleted file mode 100644 index a547fffa..00000000 Binary files a/vendor/babel/localedata/ve.dat and /dev/null differ diff --git a/vendor/babel/localedata/ve_ZA.dat b/vendor/babel/localedata/ve_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/ve_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/vi.dat b/vendor/babel/localedata/vi.dat deleted file mode 100644 index 2c1eb2d9..00000000 Binary files a/vendor/babel/localedata/vi.dat and /dev/null differ diff --git a/vendor/babel/localedata/vi_VN.dat b/vendor/babel/localedata/vi_VN.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/vi_VN.dat and /dev/null differ diff --git a/vendor/babel/localedata/vo.dat b/vendor/babel/localedata/vo.dat deleted file mode 100644 index 979ea71e..00000000 Binary files a/vendor/babel/localedata/vo.dat and /dev/null differ diff --git a/vendor/babel/localedata/vun.dat b/vendor/babel/localedata/vun.dat deleted file mode 100644 index f023b138..00000000 Binary files a/vendor/babel/localedata/vun.dat and /dev/null differ diff --git a/vendor/babel/localedata/vun_TZ.dat b/vendor/babel/localedata/vun_TZ.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/vun_TZ.dat and /dev/null differ diff --git a/vendor/babel/localedata/wae.dat b/vendor/babel/localedata/wae.dat deleted file mode 100644 index 98d0869e..00000000 Binary files a/vendor/babel/localedata/wae.dat and /dev/null differ diff --git a/vendor/babel/localedata/wae_CH.dat b/vendor/babel/localedata/wae_CH.dat deleted file mode 100644 index f981f107..00000000 Binary files a/vendor/babel/localedata/wae_CH.dat and /dev/null differ diff --git a/vendor/babel/localedata/wal.dat b/vendor/babel/localedata/wal.dat deleted file mode 100644 index 39a5a73b..00000000 Binary files a/vendor/babel/localedata/wal.dat and /dev/null differ diff --git a/vendor/babel/localedata/wal_ET.dat b/vendor/babel/localedata/wal_ET.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/wal_ET.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/xh.dat b/vendor/babel/localedata/xh.dat deleted file mode 100644 index 86f8bdc3..00000000 Binary files a/vendor/babel/localedata/xh.dat and /dev/null differ diff --git a/vendor/babel/localedata/xh_ZA.dat b/vendor/babel/localedata/xh_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/xh_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localedata/xog.dat b/vendor/babel/localedata/xog.dat deleted file mode 100644 index f660164e..00000000 Binary files a/vendor/babel/localedata/xog.dat and /dev/null differ diff --git a/vendor/babel/localedata/xog_UG.dat b/vendor/babel/localedata/xog_UG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/xog_UG.dat and /dev/null differ diff --git a/vendor/babel/localedata/yav.dat b/vendor/babel/localedata/yav.dat deleted file mode 100644 index 43d7148f..00000000 Binary files a/vendor/babel/localedata/yav.dat and /dev/null differ diff --git a/vendor/babel/localedata/yav_CM.dat b/vendor/babel/localedata/yav_CM.dat deleted file mode 100644 index 074603a4..00000000 Binary files a/vendor/babel/localedata/yav_CM.dat and /dev/null differ diff --git a/vendor/babel/localedata/yo.dat b/vendor/babel/localedata/yo.dat deleted file mode 100644 index 46120718..00000000 Binary files a/vendor/babel/localedata/yo.dat and /dev/null differ diff --git a/vendor/babel/localedata/yo_NG.dat b/vendor/babel/localedata/yo_NG.dat deleted file mode 100644 index 67b749dd..00000000 Binary files a/vendor/babel/localedata/yo_NG.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh.dat b/vendor/babel/localedata/zh.dat deleted file mode 100644 index cb916a89..00000000 Binary files a/vendor/babel/localedata/zh.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hans.dat b/vendor/babel/localedata/zh_Hans.dat deleted file mode 100644 index 980ab6b6..00000000 Binary files a/vendor/babel/localedata/zh_Hans.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hans_CN.dat b/vendor/babel/localedata/zh_Hans_CN.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/zh_Hans_CN.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/zh_Hans_HK.dat b/vendor/babel/localedata/zh_Hans_HK.dat deleted file mode 100644 index 3d1a8484..00000000 Binary files a/vendor/babel/localedata/zh_Hans_HK.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hans_MO.dat b/vendor/babel/localedata/zh_Hans_MO.dat deleted file mode 100644 index 7caa1604..00000000 Binary files a/vendor/babel/localedata/zh_Hans_MO.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hans_SG.dat b/vendor/babel/localedata/zh_Hans_SG.dat deleted file mode 100644 index 7c57b7bf..00000000 Binary files a/vendor/babel/localedata/zh_Hans_SG.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hant.dat b/vendor/babel/localedata/zh_Hant.dat deleted file mode 100644 index 6e093dd2..00000000 Binary files a/vendor/babel/localedata/zh_Hant.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hant_HK.dat b/vendor/babel/localedata/zh_Hant_HK.dat deleted file mode 100644 index f5a59a1e..00000000 Binary files a/vendor/babel/localedata/zh_Hant_HK.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hant_MO.dat b/vendor/babel/localedata/zh_Hant_MO.dat deleted file mode 100644 index 7ce4ddc0..00000000 Binary files a/vendor/babel/localedata/zh_Hant_MO.dat and /dev/null differ diff --git a/vendor/babel/localedata/zh_Hant_TW.dat b/vendor/babel/localedata/zh_Hant_TW.dat deleted file mode 100644 index 81c89254..00000000 --- a/vendor/babel/localedata/zh_Hant_TW.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q U first_dayqKsU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U unit_patternsq#}q$u. \ No newline at end of file diff --git a/vendor/babel/localedata/zu.dat b/vendor/babel/localedata/zu.dat deleted file mode 100644 index 99b984e8..00000000 Binary files a/vendor/babel/localedata/zu.dat and /dev/null differ diff --git a/vendor/babel/localedata/zu_ZA.dat b/vendor/babel/localedata/zu_ZA.dat deleted file mode 100644 index 35690913..00000000 --- a/vendor/babel/localedata/zu_ZA.dat +++ /dev/null @@ -1,4 +0,0 @@ -€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq -}q U week_dataq }q (Umin_daysqKU weekend_startqKU first_dayqKU weekend_endqKuU zone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qU territoriesq}U -time_zonesq}qUscriptsq}qUdecimal_formatsq}qU -meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U unit_patternsq&}q'u. \ No newline at end of file diff --git a/vendor/babel/localtime/__init__.py b/vendor/babel/localtime/__init__.py deleted file mode 100644 index cdb3e9b5..00000000 --- a/vendor/babel/localtime/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.localtime - ~~~~~~~~~~~~~~~ - - Babel specific fork of tzlocal to determine the local timezone - of the system. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -import sys -import pytz -import time -from datetime import timedelta, datetime -from datetime import tzinfo -from threading import RLock - -if sys.platform == 'win32': - from babel.localtime._win32 import _get_localzone -else: - from babel.localtime._unix import _get_localzone - - -_cached_tz = None -_cache_lock = RLock() - -STDOFFSET = timedelta(seconds = -time.timezone) -if time.daylight: - DSTOFFSET = timedelta(seconds = -time.altzone) -else: - DSTOFFSET = STDOFFSET - -DSTDIFF = DSTOFFSET - STDOFFSET -ZERO = timedelta(0) - - -class _FallbackLocalTimezone(tzinfo): - - def utcoffset(self, dt): - if self._isdst(dt): - return DSTOFFSET - else: - return STDOFFSET - - def dst(self, dt): - if self._isdst(dt): - return DSTDIFF - else: - return ZERO - - def tzname(self, dt): - return time.tzname[self._isdst(dt)] - - def _isdst(self, dt): - tt = (dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, - dt.weekday(), 0, -1) - stamp = time.mktime(tt) - tt = time.localtime(stamp) - return tt.tm_isdst > 0 - - -def get_localzone(): - """Returns the current underlying local timezone object. - Generally this function does not need to be used, it's a - better idea to use the :data:`LOCALTZ` singleton instead. - """ - return _get_localzone() - - -try: - LOCALTZ = get_localzone() -except pytz.UnknownTimeZoneError: - LOCALTZ = _FallbackLocalTimezone() diff --git a/vendor/babel/localtime/_unix.py b/vendor/babel/localtime/_unix.py deleted file mode 100644 index b4a3b599..00000000 --- a/vendor/babel/localtime/_unix.py +++ /dev/null @@ -1,137 +0,0 @@ -from __future__ import with_statement -import os -import re -import sys -import pytz -import subprocess - -_systemconfig_tz = re.compile(r'^Time Zone: (.*)$(?m)') - - -def _tz_from_env(tzenv): - if tzenv[0] == ':': - tzenv = tzenv[1:] - - # TZ specifies a file - if os.path.exists(tzenv): - with open(tzenv, 'rb') as tzfile: - return pytz.tzfile.build_tzinfo('local', tzfile) - - # TZ specifies a zoneinfo zone. - try: - tz = pytz.timezone(tzenv) - # That worked, so we return this: - return tz - except pytz.UnknownTimeZoneError: - raise pytz.UnknownTimeZoneError( - "tzlocal() does not support non-zoneinfo timezones like %s. \n" - "Please use a timezone in the form of Continent/City") - -def _get_localzone(_root='/'): - """Tries to find the local timezone configuration. - This method prefers finding the timezone name and passing that to pytz, - over passing in the localtime file, as in the later case the zoneinfo - name is unknown. - The parameter _root makes the function look for files like /etc/localtime - beneath the _root directory. This is primarily used by the tests. - In normal usage you call the function without parameters. - """ - - tzenv = os.environ.get('TZ') - if tzenv: - return _tz_from_env(tzenv) - - # This is actually a pretty reliable way to test for the local time - # zone on operating systems like OS X. On OS X especially this is the - # only one that actually works. - try: - link_dst = os.readlink('/etc/localtime') - except OSError: - pass - else: - pos = link_dst.find('/zoneinfo/') - if pos >= 0: - zone_name = link_dst[pos + 10:] - try: - return pytz.timezone(zone_name) - except pytz.UnknownTimeZoneError: - pass - - # If we are on OS X now we are pretty sure that the rest of the - # code will fail and just fall through until it hits the reading - # of /etc/localtime and using it without name. At this point we - # can invoke systemconfig which internally invokes ICU. ICU itself - # does the same thing we do (readlink + compare file contents) but - # since it knows where the zone files are that should be a bit - # better than reimplementing the logic here. - if sys.platform == 'darwin': - c = subprocess.Popen(['systemsetup', '-gettimezone'], - stdout=subprocess.PIPE) - sys_result = c.communicate()[0] - c.wait() - tz_match = _systemconfig_tz.search(sys_result) - if tz_match is not None: - zone_name = tz_match.group(1) - try: - return pytz.timezone(zone_name) - except pytz.UnknownTimeZoneError: - pass - - # Now look for distribution specific configuration files - # that contain the timezone name. - tzpath = os.path.join(_root, 'etc/timezone') - if os.path.exists(tzpath): - with open(tzpath, 'rb') as tzfile: - data = tzfile.read() - - # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file. - # That's a misconfiguration, but we need to handle it gracefully: - if data[:5] != 'TZif2': - etctz = data.strip().decode() - # Get rid of host definitions and comments: - if ' ' in etctz: - etctz, dummy = etctz.split(' ', 1) - if '#' in etctz: - etctz, dummy = etctz.split('#', 1) - return pytz.timezone(etctz.replace(' ', '_')) - - # CentOS has a ZONE setting in /etc/sysconfig/clock, - # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and - # Gentoo has a TIMEZONE setting in /etc/conf.d/clock - # We look through these files for a timezone: - zone_re = re.compile('\s*ZONE\s*=\s*\"') - timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"') - end_re = re.compile('\"') - - for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): - tzpath = os.path.join(_root, filename) - if not os.path.exists(tzpath): - continue - with open(tzpath, 'rt') as tzfile: - data = tzfile.readlines() - - for line in data: - # Look for the ZONE= setting. - match = zone_re.match(line) - if match is None: - # No ZONE= setting. Look for the TIMEZONE= setting. - match = timezone_re.match(line) - if match is not None: - # Some setting existed - line = line[match.end():] - etctz = line[:end_re.search(line).start()] - - # We found a timezone - return pytz.timezone(etctz.replace(' ', '_')) - - # No explicit setting existed. Use localtime - for filename in ('etc/localtime', 'usr/local/etc/localtime'): - tzpath = os.path.join(_root, filename) - - if not os.path.exists(tzpath): - continue - - with open(tzpath, 'rb') as tzfile: - return pytz.tzfile.build_tzinfo('local', tzfile) - - raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') diff --git a/vendor/babel/localtime/_win32.py b/vendor/babel/localtime/_win32.py deleted file mode 100644 index 1f6ecc7c..00000000 --- a/vendor/babel/localtime/_win32.py +++ /dev/null @@ -1,89 +0,0 @@ -try: - import _winreg as winreg -except ImportError: - try: - import winreg - except ImportError: - winreg = None - -from babel.core import get_global -import pytz - - -tz_names = get_global('windows_zone_mapping') - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dict = {} - size = winreg.QueryInfoKey(key)[1] - for i in range(size): - data = winreg.EnumValue(key, i) - dict[data[0]] = data[1] - return dict - - -def get_localzone_name(): - # Windows is special. It has unique time zone names (in several - # meanings of the word) available, but unfortunately, they can be - # translated to the language of the operating system, so we need to - # do a backwards lookup, by going through all time zones and see which - # one matches. - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - - TZLOCALKEYNAME = r'SYSTEM\CurrentControlSet\Control\TimeZoneInformation' - localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) - keyvalues = valuestodict(localtz) - localtz.Close() - if 'TimeZoneKeyName' in keyvalues: - # Windows 7 (and Vista?) - - # For some reason this returns a string with loads of NUL bytes at - # least on some systems. I don't know if this is a bug somewhere, I - # just work around it. - tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0] - else: - # Windows 2000 or XP - - # This is the localized name: - tzwin = keyvalues['StandardName'] - - # Open the list of timezones to look up the real name: - TZKEYNAME = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones' - tzkey = winreg.OpenKey(handle, TZKEYNAME) - - # Now, match this value to Time Zone information - tzkeyname = None - for i in range(winreg.QueryInfoKey(tzkey)[0]): - subkey = winreg.EnumKey(tzkey, i) - sub = winreg.OpenKey(tzkey, subkey) - data = valuestodict(sub) - sub.Close() - if data['Std'] == tzwin: - tzkeyname = subkey - break - - tzkey.Close() - handle.Close() - - if tzkeyname is None: - raise LookupError('Can not find Windows timezone configuration') - - timezone = tz_names.get(tzkeyname) - if timezone is None: - # Nope, that didn't work. Try adding 'Standard Time', - # it seems to work a lot of times: - timezone = tz_names.get(tzkeyname + ' Standard Time') - - # Return what we have. - if timezone is None: - raise pytz.UnknownTimeZoneError('Can not find timezone ' + tzkeyname) - - return timezone - - -def _get_localzone(): - if winreg is None: - raise pytz.UnknownTimeZoneError( - 'Runtime support not available') - return pytz.timezone(get_localzone_name()) diff --git a/vendor/babel/messages/__init__.py b/vendor/babel/messages/__init__.py deleted file mode 100644 index 1b63bae2..00000000 --- a/vendor/babel/messages/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages - ~~~~~~~~~~~~~~ - - Support for ``gettext`` message catalogs. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -from babel.messages.catalog import * diff --git a/vendor/babel/messages/catalog.py b/vendor/babel/messages/catalog.py deleted file mode 100644 index 501763b5..00000000 --- a/vendor/babel/messages/catalog.py +++ /dev/null @@ -1,831 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages.catalog - ~~~~~~~~~~~~~~~~~~~~~~ - - Data structures for message catalogs. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -import re -import time - -from cgi import parse_header -from datetime import datetime, time as time_ -from difflib import get_close_matches -from email import message_from_string -from copy import copy - -from babel import __version__ as VERSION -from babel.core import Locale -from babel.dates import format_datetime -from babel.messages.plurals import get_plural -from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone -from babel._compat import string_types, number_types, PY2, cmp - -__all__ = ['Message', 'Catalog', 'TranslationError'] - - -PYTHON_FORMAT = re.compile(r'''(?x) - \% - (?:\(([\w]*)\))? - ( - [-#0\ +]?(?:\*|[\d]+)? - (?:\.(?:\*|[\d]+))? - [hlL]? - ) - ([diouxXeEfFgGcrs%]) -''') - - -class Message(object): - """Representation of a single message in a catalog.""" - - def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(), - user_comments=(), previous_id=(), lineno=None, context=None): - """Create the message object. - - :param id: the message ID, or a ``(singular, plural)`` tuple for - pluralizable messages - :param string: the translated message string, or a - ``(singular, plural)`` tuple for pluralizable messages - :param locations: a sequence of ``(filenname, lineno)`` tuples - :param flags: a set or sequence of flags - :param auto_comments: a sequence of automatic comments for the message - :param user_comments: a sequence of user comments for the message - :param previous_id: the previous message ID, or a ``(singular, plural)`` - tuple for pluralizable messages - :param lineno: the line number on which the msgid line was found in the - PO file, if any - :param context: the message context - """ - self.id = id #: The message ID - if not string and self.pluralizable: - string = (u'', u'') - self.string = string #: The message translation - self.locations = list(distinct(locations)) - self.flags = set(flags) - if id and self.python_format: - self.flags.add('python-format') - else: - self.flags.discard('python-format') - self.auto_comments = list(distinct(auto_comments)) - self.user_comments = list(distinct(user_comments)) - if isinstance(previous_id, string_types): - self.previous_id = [previous_id] - else: - self.previous_id = list(previous_id) - self.lineno = lineno - self.context = context - - def __repr__(self): - return '<%s %r (flags: %r)>' % (type(self).__name__, self.id, - list(self.flags)) - - def __cmp__(self, obj): - """Compare Messages, taking into account plural ids""" - def values_to_compare(): - if isinstance(obj, Message): - plural = self.pluralizable - obj_plural = obj.pluralizable - if plural and obj_plural: - return self.id[0], obj.id[0] - elif plural: - return self.id[0], obj.id - elif obj_plural: - return self.id, obj.id[0] - return self.id, obj.id - this, other = values_to_compare() - return cmp(this, other) - - def __gt__(self, other): - return self.__cmp__(other) > 0 - - def __lt__(self, other): - return self.__cmp__(other) < 0 - - def __ge__(self, other): - return self.__cmp__(other) >= 0 - - def __le__(self, other): - return self.__cmp__(other) <= 0 - - def __eq__(self, other): - return self.__cmp__(other) == 0 - - def __ne__(self, other): - return self.__cmp__(other) != 0 - - def clone(self): - return Message(*map(copy, (self.id, self.string, self.locations, - self.flags, self.auto_comments, - self.user_comments, self.previous_id, - self.lineno, self.context))) - - def check(self, catalog=None): - """Run various validation checks on the message. Some validations - are only performed if the catalog is provided. This method returns - a sequence of `TranslationError` objects. - - :rtype: ``iterator`` - :param catalog: A catalog instance that is passed to the checkers - :see: `Catalog.check` for a way to perform checks for all messages - in a catalog. - """ - from babel.messages.checkers import checkers - errors = [] - for checker in checkers: - try: - checker(catalog, self) - except TranslationError as e: - errors.append(e) - return errors - - @property - def fuzzy(self): - """Whether the translation is fuzzy. - - >>> Message('foo').fuzzy - False - >>> msg = Message('foo', 'foo', flags=['fuzzy']) - >>> msg.fuzzy - True - >>> msg - - - :type: `bool`""" - return 'fuzzy' in self.flags - - @property - def pluralizable(self): - """Whether the message is plurizable. - - >>> Message('foo').pluralizable - False - >>> Message(('foo', 'bar')).pluralizable - True - - :type: `bool`""" - return isinstance(self.id, (list, tuple)) - - @property - def python_format(self): - """Whether the message contains Python-style parameters. - - >>> Message('foo %(name)s bar').python_format - True - >>> Message(('foo %(name)s', 'foo %(name)s')).python_format - True - - :type: `bool`""" - ids = self.id - if not isinstance(ids, (list, tuple)): - ids = [ids] - return any(PYTHON_FORMAT.search(id) for id in ids) - - -class TranslationError(Exception): - """Exception thrown by translation checkers when invalid message - translations are encountered.""" - - -DEFAULT_HEADER = u"""\ -# Translations template for PROJECT. -# Copyright (C) YEAR ORGANIZATION -# This file is distributed under the same license as the PROJECT project. -# FIRST AUTHOR , YEAR. -#""" - - -if PY2: - def _parse_header(header_string): - # message_from_string only works for str, not for unicode - headers = message_from_string(header_string.encode('utf8')) - decoded_headers = {} - for name, value in headers.items(): - name = name.decode('utf8') - value = value.decode('utf8') - decoded_headers[name] = value - return decoded_headers - -else: - _parse_header = message_from_string - - -class Catalog(object): - """Representation of a message catalog.""" - - def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER, - project=None, version=None, copyright_holder=None, - msgid_bugs_address=None, creation_date=None, - revision_date=None, last_translator=None, language_team=None, - charset=None, fuzzy=True): - """Initialize the catalog object. - - :param locale: the locale identifier or `Locale` object, or `None` - if the catalog is not bound to a locale (which basically - means it's a template) - :param domain: the message domain - :param header_comment: the header comment as string, or `None` for the - default header - :param project: the project's name - :param version: the project's version - :param copyright_holder: the copyright holder of the catalog - :param msgid_bugs_address: the email address or URL to submit bug - reports to - :param creation_date: the date the catalog was created - :param revision_date: the date the catalog was revised - :param last_translator: the name and email of the last translator - :param language_team: the name and email of the language team - :param charset: the encoding to use in the output (defaults to utf-8) - :param fuzzy: the fuzzy bit on the catalog header - """ - self.domain = domain #: The message domain - if locale: - locale = Locale.parse(locale) - self.locale = locale #: The locale or `None` - self._header_comment = header_comment - self._messages = odict() - - self.project = project or 'PROJECT' #: The project name - self.version = version or 'VERSION' #: The project version - self.copyright_holder = copyright_holder or 'ORGANIZATION' - self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS' - - self.last_translator = last_translator or 'FULL NAME ' - """Name and email address of the last translator.""" - self.language_team = language_team or 'LANGUAGE ' - """Name and email address of the language team.""" - - self.charset = charset or 'utf-8' - - if creation_date is None: - creation_date = datetime.now(LOCALTZ) - elif isinstance(creation_date, datetime) and not creation_date.tzinfo: - creation_date = creation_date.replace(tzinfo=LOCALTZ) - self.creation_date = creation_date #: Creation date of the template - if revision_date is None: - revision_date = 'YEAR-MO-DA HO:MI+ZONE' - elif isinstance(revision_date, datetime) and not revision_date.tzinfo: - revision_date = revision_date.replace(tzinfo=LOCALTZ) - self.revision_date = revision_date #: Last revision date of the catalog - self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`) - - self.obsolete = odict() #: Dictionary of obsolete messages - self._num_plurals = None - self._plural_expr = None - - def _get_header_comment(self): - comment = self._header_comment - year = datetime.now(LOCALTZ).strftime('%Y') - if hasattr(self.revision_date, 'strftime'): - year = self.revision_date.strftime('%Y') - comment = comment.replace('PROJECT', self.project) \ - .replace('VERSION', self.version) \ - .replace('YEAR', year) \ - .replace('ORGANIZATION', self.copyright_holder) - if self.locale: - comment = comment.replace('Translations template', '%s translations' - % self.locale.english_name) - return comment - - def _set_header_comment(self, string): - self._header_comment = string - - header_comment = property(_get_header_comment, _set_header_comment, doc="""\ - The header comment for the catalog. - - >>> catalog = Catalog(project='Foobar', version='1.0', - ... copyright_holder='Foo Company') - >>> print catalog.header_comment #doctest: +ELLIPSIS - # Translations template for Foobar. - # Copyright (C) ... Foo Company - # This file is distributed under the same license as the Foobar project. - # FIRST AUTHOR , .... - # - - The header can also be set from a string. Any known upper-case variables - will be replaced when the header is retrieved again: - - >>> catalog = Catalog(project='Foobar', version='1.0', - ... copyright_holder='Foo Company') - >>> catalog.header_comment = '''\\ - ... # The POT for my really cool PROJECT project. - ... # Copyright (C) 1990-2003 ORGANIZATION - ... # This file is distributed under the same license as the PROJECT - ... # project. - ... #''' - >>> print catalog.header_comment - # The POT for my really cool Foobar project. - # Copyright (C) 1990-2003 Foo Company - # This file is distributed under the same license as the Foobar - # project. - # - - :type: `unicode` - """) - - def _get_mime_headers(self): - headers = [] - headers.append(('Project-Id-Version', - '%s %s' % (self.project, self.version))) - headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address)) - headers.append(('POT-Creation-Date', - format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ', - locale='en'))) - if isinstance(self.revision_date, (datetime, time_) + number_types): - headers.append(('PO-Revision-Date', - format_datetime(self.revision_date, - 'yyyy-MM-dd HH:mmZ', locale='en'))) - else: - headers.append(('PO-Revision-Date', self.revision_date)) - headers.append(('Last-Translator', self.last_translator)) - if (self.locale is not None) and ('LANGUAGE' in self.language_team): - headers.append(('Language-Team', - self.language_team.replace('LANGUAGE', - str(self.locale)))) - else: - headers.append(('Language-Team', self.language_team)) - if self.locale is not None: - headers.append(('Plural-Forms', self.plural_forms)) - headers.append(('MIME-Version', '1.0')) - headers.append(('Content-Type', - 'text/plain; charset=%s' % self.charset)) - headers.append(('Content-Transfer-Encoding', '8bit')) - headers.append(('Generated-By', 'Babel %s\n' % VERSION)) - return headers - - def _set_mime_headers(self, headers): - for name, value in headers: - name = name.lower() - if name == 'project-id-version': - parts = value.split(' ') - self.project = u' '.join(parts[:-1]) - self.version = parts[-1] - elif name == 'report-msgid-bugs-to': - self.msgid_bugs_address = value - elif name == 'last-translator': - self.last_translator = value - elif name == 'language-team': - self.language_team = value - elif name == 'content-type': - mimetype, params = parse_header(value) - if 'charset' in params: - self.charset = params['charset'].lower() - elif name == 'plural-forms': - _, params = parse_header(' ;' + value) - self._num_plurals = int(params.get('nplurals', 2)) - self._plural_expr = params.get('plural', '(n != 1)') - elif name == 'pot-creation-date': - # FIXME: this should use dates.parse_datetime as soon as that - # is ready - value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1) - - tt = time.strptime(value, '%Y-%m-%d %H:%M') - ts = time.mktime(tt) - - # Separate the offset into a sign component, hours, and minutes - plus_minus_s, rest = tzoffset[0], tzoffset[1:] - hours_offset_s, mins_offset_s = rest[:2], rest[2:] - - # Make them all integers - plus_minus = int(plus_minus_s + '1') - hours_offset = int(hours_offset_s) - mins_offset = int(mins_offset_s) - - # Calculate net offset - net_mins_offset = hours_offset * 60 - net_mins_offset += mins_offset - net_mins_offset *= plus_minus - - # Create an offset object - tzoffset = FixedOffsetTimezone(net_mins_offset) - - # Store the offset in a datetime object - dt = datetime.fromtimestamp(ts) - self.creation_date = dt.replace(tzinfo=tzoffset) - elif name == 'po-revision-date': - # Keep the value if it's not the default one - if 'YEAR' not in value: - # FIXME: this should use dates.parse_datetime as soon as - # that is ready - value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1) - tt = time.strptime(value, '%Y-%m-%d %H:%M') - ts = time.mktime(tt) - - # Separate the offset into a sign component, hours, and - # minutes - plus_minus_s, rest = tzoffset[0], tzoffset[1:] - hours_offset_s, mins_offset_s = rest[:2], rest[2:] - - # Make them all integers - plus_minus = int(plus_minus_s + '1') - hours_offset = int(hours_offset_s) - mins_offset = int(mins_offset_s) - - # Calculate net offset - net_mins_offset = hours_offset * 60 - net_mins_offset += mins_offset - net_mins_offset *= plus_minus - - # Create an offset object - tzoffset = FixedOffsetTimezone(net_mins_offset) - - # Store the offset in a datetime object - dt = datetime.fromtimestamp(ts) - self.revision_date = dt.replace(tzinfo=tzoffset) - - mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\ - The MIME headers of the catalog, used for the special ``msgid ""`` entry. - - The behavior of this property changes slightly depending on whether a locale - is set or not, the latter indicating that the catalog is actually a template - for actual translations. - - Here's an example of the output for such a catalog template: - - >>> from babel.dates import UTC - >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC) - >>> catalog = Catalog(project='Foobar', version='1.0', - ... creation_date=created) - >>> for name, value in catalog.mime_headers: - ... print '%s: %s' % (name, value) - Project-Id-Version: Foobar 1.0 - Report-Msgid-Bugs-To: EMAIL@ADDRESS - POT-Creation-Date: 1990-04-01 15:30+0000 - PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE - Last-Translator: FULL NAME - Language-Team: LANGUAGE - MIME-Version: 1.0 - Content-Type: text/plain; charset=utf-8 - Content-Transfer-Encoding: 8bit - Generated-By: Babel ... - - And here's an example of the output when the locale is set: - - >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC) - >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0', - ... creation_date=created, revision_date=revised, - ... last_translator='John Doe ', - ... language_team='de_DE ') - >>> for name, value in catalog.mime_headers: - ... print '%s: %s' % (name, value) - Project-Id-Version: Foobar 1.0 - Report-Msgid-Bugs-To: EMAIL@ADDRESS - POT-Creation-Date: 1990-04-01 15:30+0000 - PO-Revision-Date: 1990-08-03 12:00+0000 - Last-Translator: John Doe - Language-Team: de_DE - Plural-Forms: nplurals=2; plural=(n != 1) - MIME-Version: 1.0 - Content-Type: text/plain; charset=utf-8 - Content-Transfer-Encoding: 8bit - Generated-By: Babel ... - - :type: `list` - """) - - @property - def num_plurals(self): - """The number of plurals used by the catalog or locale. - - >>> Catalog(locale='en').num_plurals - 2 - >>> Catalog(locale='ga').num_plurals - 3 - - :type: `int`""" - if self._num_plurals is None: - num = 2 - if self.locale: - num = get_plural(self.locale)[0] - self._num_plurals = num - return self._num_plurals - - @property - def plural_expr(self): - """The plural expression used by the catalog or locale. - - >>> Catalog(locale='en').plural_expr - '(n != 1)' - >>> Catalog(locale='ga').plural_expr - '(n==1 ? 0 : n==2 ? 1 : 2)' - - :type: `string_types`""" - if self._plural_expr is None: - expr = '(n != 1)' - if self.locale: - expr = get_plural(self.locale)[1] - self._plural_expr = expr - return self._plural_expr - - @property - def plural_forms(self): - """Return the plural forms declaration for the locale. - - >>> Catalog(locale='en').plural_forms - 'nplurals=2; plural=(n != 1)' - >>> Catalog(locale='pt_BR').plural_forms - 'nplurals=2; plural=(n > 1)' - - :type: `str`""" - return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr) - - def __contains__(self, id): - """Return whether the catalog has a message with the specified ID.""" - return self._key_for(id) in self._messages - - def __len__(self): - """The number of messages in the catalog. - - This does not include the special ``msgid ""`` entry.""" - return len(self._messages) - - def __iter__(self): - """Iterates through all the entries in the catalog, in the order they - were added, yielding a `Message` object for every entry. - - :rtype: ``iterator``""" - buf = [] - for name, value in self.mime_headers: - buf.append('%s: %s' % (name, value)) - flags = set() - if self.fuzzy: - flags |= set(['fuzzy']) - yield Message(u'', '\n'.join(buf), flags=flags) - for key in self._messages: - yield self._messages[key] - - def __repr__(self): - locale = '' - if self.locale: - locale = ' %s' % self.locale - return '<%s %r%s>' % (type(self).__name__, self.domain, locale) - - def __delitem__(self, id): - """Delete the message with the specified ID.""" - self.delete(id) - - def __getitem__(self, id): - """Return the message with the specified ID. - - :param id: the message ID - """ - return self.get(id) - - def __setitem__(self, id, message): - """Add or update the message with the specified ID. - - >>> catalog = Catalog() - >>> catalog[u'foo'] = Message(u'foo') - >>> catalog[u'foo'] - - - If a message with that ID is already in the catalog, it is updated - to include the locations and flags of the new message. - - >>> catalog = Catalog() - >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)]) - >>> catalog[u'foo'].locations - [('main.py', 1)] - >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)]) - >>> catalog[u'foo'].locations - [('main.py', 1), ('utils.py', 5)] - - :param id: the message ID - :param message: the `Message` object - """ - assert isinstance(message, Message), 'expected a Message object' - key = self._key_for(id, message.context) - current = self._messages.get(key) - if current: - if message.pluralizable and not current.pluralizable: - # The new message adds pluralization - current.id = message.id - current.string = message.string - current.locations = list(distinct(current.locations + - message.locations)) - current.auto_comments = list(distinct(current.auto_comments + - message.auto_comments)) - current.user_comments = list(distinct(current.user_comments + - message.user_comments)) - current.flags |= message.flags - message = current - elif id == '': - # special treatment for the header message - self.mime_headers = _parse_header(message.string).items() - self.header_comment = '\n'.join([('# %s' % c).rstrip() for c - in message.user_comments]) - self.fuzzy = message.fuzzy - else: - if isinstance(id, (list, tuple)): - assert isinstance(message.string, (list, tuple)), \ - 'Expected sequence but got %s' % type(message.string) - self._messages[key] = message - - def add(self, id, string=None, locations=(), flags=(), auto_comments=(), - user_comments=(), previous_id=(), lineno=None, context=None): - """Add or update the message with the specified ID. - - >>> catalog = Catalog() - >>> catalog.add(u'foo') - - >>> catalog[u'foo'] - - - This method simply constructs a `Message` object with the given - arguments and invokes `__setitem__` with that object. - - :param id: the message ID, or a ``(singular, plural)`` tuple for - pluralizable messages - :param string: the translated message string, or a - ``(singular, plural)`` tuple for pluralizable messages - :param locations: a sequence of ``(filenname, lineno)`` tuples - :param flags: a set or sequence of flags - :param auto_comments: a sequence of automatic comments - :param user_comments: a sequence of user comments - :param previous_id: the previous message ID, or a ``(singular, plural)`` - tuple for pluralizable messages - :param lineno: the line number on which the msgid line was found in the - PO file, if any - :param context: the message context - """ - message = Message(id, string, list(locations), flags, auto_comments, - user_comments, previous_id, lineno=lineno, - context=context) - self[id] = message - return message - - def check(self): - """Run various validation checks on the translations in the catalog. - - For every message which fails validation, this method yield a - ``(message, errors)`` tuple, where ``message`` is the `Message` object - and ``errors`` is a sequence of `TranslationError` objects. - - :rtype: ``iterator`` - """ - for message in self._messages.values(): - errors = message.check(catalog=self) - if errors: - yield message, errors - - def get(self, id, context=None): - """Return the message with the specified ID and context. - - :param id: the message ID - :param context: the message context, or ``None`` for no context - """ - return self._messages.get(self._key_for(id, context)) - - def delete(self, id, context=None): - """Delete the message with the specified ID and context. - - :param id: the message ID - :param context: the message context, or ``None`` for no context - """ - key = self._key_for(id, context) - if key in self._messages: - del self._messages[key] - - def update(self, template, no_fuzzy_matching=False): - """Update the catalog based on the given template catalog. - - >>> from babel.messages import Catalog - >>> template = Catalog() - >>> template.add('green', locations=[('main.py', 99)]) - - >>> template.add('blue', locations=[('main.py', 100)]) - - >>> template.add(('salad', 'salads'), locations=[('util.py', 42)]) - - >>> catalog = Catalog(locale='de_DE') - >>> catalog.add('blue', u'blau', locations=[('main.py', 98)]) - - >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)]) - - >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'), - ... locations=[('util.py', 38)]) - - - >>> catalog.update(template) - >>> len(catalog) - 3 - - >>> msg1 = catalog['green'] - >>> msg1.string - >>> msg1.locations - [('main.py', 99)] - - >>> msg2 = catalog['blue'] - >>> msg2.string - u'blau' - >>> msg2.locations - [('main.py', 100)] - - >>> msg3 = catalog['salad'] - >>> msg3.string - (u'Salat', u'Salate') - >>> msg3.locations - [('util.py', 42)] - - Messages that are in the catalog but not in the template are removed - from the main collection, but can still be accessed via the `obsolete` - member: - - >>> 'head' in catalog - False - >>> catalog.obsolete.values() - [] - - :param template: the reference catalog, usually read from a POT file - :param no_fuzzy_matching: whether to use fuzzy matching of message IDs - """ - messages = self._messages - remaining = messages.copy() - self._messages = odict() - - # Prepare for fuzzy matching - fuzzy_candidates = [] - if not no_fuzzy_matching: - fuzzy_candidates = dict([ - (self._key_for(msgid), messages[msgid].context) - for msgid in messages if msgid and messages[msgid].string - ]) - fuzzy_matches = set() - - def _merge(message, oldkey, newkey): - message = message.clone() - fuzzy = False - if oldkey != newkey: - fuzzy = True - fuzzy_matches.add(oldkey) - oldmsg = messages.get(oldkey) - if isinstance(oldmsg.id, string_types): - message.previous_id = [oldmsg.id] - else: - message.previous_id = list(oldmsg.id) - else: - oldmsg = remaining.pop(oldkey, None) - message.string = oldmsg.string - if isinstance(message.id, (list, tuple)): - if not isinstance(message.string, (list, tuple)): - fuzzy = True - message.string = tuple( - [message.string] + ([u''] * (len(message.id) - 1)) - ) - elif len(message.string) != self.num_plurals: - fuzzy = True - message.string = tuple(message.string[:len(oldmsg.string)]) - elif isinstance(message.string, (list, tuple)): - fuzzy = True - message.string = message.string[0] - message.flags |= oldmsg.flags - if fuzzy: - message.flags |= set([u'fuzzy']) - self[message.id] = message - - for message in template: - if message.id: - key = self._key_for(message.id, message.context) - if key in messages: - _merge(message, key, key) - else: - if no_fuzzy_matching is False: - # do some fuzzy matching with difflib - if isinstance(key, tuple): - matchkey = key[0] # just the msgid, no context - else: - matchkey = key - matches = get_close_matches(matchkey.lower().strip(), - fuzzy_candidates.keys(), 1) - if matches: - newkey = matches[0] - newctxt = fuzzy_candidates[newkey] - if newctxt is not None: - newkey = newkey, newctxt - _merge(message, newkey, key) - continue - - self[message.id] = message - - for msgid in remaining: - if no_fuzzy_matching or msgid not in fuzzy_matches: - self.obsolete[msgid] = remaining[msgid] - # Make updated catalog's POT-Creation-Date equal to the template - # used to update the catalog - self.creation_date = template.creation_date - - def _key_for(self, id, context=None): - """The key for a message is just the singular ID even for pluralizable - messages, but is a ``(msgid, msgctxt)`` tuple for context-specific - messages. - """ - key = id - if isinstance(key, (list, tuple)): - key = id[0] - if context is not None: - key = (key, context) - return key diff --git a/vendor/babel/messages/checkers.py b/vendor/babel/messages/checkers.py deleted file mode 100644 index 24ecdcfe..00000000 --- a/vendor/babel/messages/checkers.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages.checkers - ~~~~~~~~~~~~~~~~~~~~~~~ - - Various routines that help with validation of translations. - - :since: version 0.9 - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -from babel.messages.catalog import TranslationError, PYTHON_FORMAT -from babel._compat import string_types, izip - - -#: list of format chars that are compatible to each other -_string_format_compatibilities = [ - set(['i', 'd', 'u']), - set(['x', 'X']), - set(['f', 'F', 'g', 'G']) -] - - -def num_plurals(catalog, message): - """Verify the number of plurals in the translation.""" - if not message.pluralizable: - if not isinstance(message.string, string_types): - raise TranslationError("Found plural forms for non-pluralizable " - "message") - return - - # skip further tests if no catalog is provided. - elif catalog is None: - return - - msgstrs = message.string - if not isinstance(msgstrs, (list, tuple)): - msgstrs = (msgstrs,) - if len(msgstrs) != catalog.num_plurals: - raise TranslationError("Wrong number of plural forms (expected %d)" % - catalog.num_plurals) - - -def python_format(catalog, message): - """Verify the format string placeholders in the translation.""" - if 'python-format' not in message.flags: - return - msgids = message.id - if not isinstance(msgids, (list, tuple)): - msgids = (msgids,) - msgstrs = message.string - if not isinstance(msgstrs, (list, tuple)): - msgstrs = (msgstrs,) - - for msgid, msgstr in izip(msgids, msgstrs): - if msgstr: - _validate_format(msgid, msgstr) - - -def _validate_format(format, alternative): - """Test format string `alternative` against `format`. `format` can be the - msgid of a message and `alternative` one of the `msgstr`\s. The two - arguments are not interchangeable as `alternative` may contain less - placeholders if `format` uses named placeholders. - - The behavior of this function is undefined if the string does not use - string formattings. - - If the string formatting of `alternative` is compatible to `format` the - function returns `None`, otherwise a `TranslationError` is raised. - - Examples for compatible format strings: - - >>> _validate_format('Hello %s!', 'Hallo %s!') - >>> _validate_format('Hello %i!', 'Hallo %d!') - - Example for an incompatible format strings: - - >>> _validate_format('Hello %(name)s!', 'Hallo %s!') - Traceback (most recent call last): - ... - TranslationError: the format strings are of different kinds - - This function is used by the `python_format` checker. - - :param format: The original format string - :param alternative: The alternative format string that should be checked - against format - :raises TranslationError: on formatting errors - """ - - def _parse(string): - result = [] - for match in PYTHON_FORMAT.finditer(string): - name, format, typechar = match.groups() - if typechar == '%' and name is None: - continue - result.append((name, str(typechar))) - return result - - def _compatible(a, b): - if a == b: - return True - for set in _string_format_compatibilities: - if a in set and b in set: - return True - return False - - def _check_positional(results): - positional = None - for name, char in results: - if positional is None: - positional = name is None - else: - if (name is None) != positional: - raise TranslationError('format string mixes positional ' - 'and named placeholders') - return bool(positional) - - a, b = map(_parse, (format, alternative)) - - # now check if both strings are positional or named - a_positional, b_positional = map(_check_positional, (a, b)) - if a_positional and not b_positional and not b: - raise TranslationError('placeholders are incompatible') - elif a_positional != b_positional: - raise TranslationError('the format strings are of different kinds') - - # if we are operating on positional strings both must have the - # same number of format chars and those must be compatible - if a_positional: - if len(a) != len(b): - raise TranslationError('positional format placeholders are ' - 'unbalanced') - for idx, ((_, first), (_, second)) in enumerate(izip(a, b)): - if not _compatible(first, second): - raise TranslationError('incompatible format for placeholder ' - '%d: %r and %r are not compatible' % - (idx + 1, first, second)) - - # otherwise the second string must not have names the first one - # doesn't have and the types of those included must be compatible - else: - type_map = dict(a) - for name, typechar in b: - if name not in type_map: - raise TranslationError('unknown named placeholder %r' % name) - elif not _compatible(typechar, type_map[name]): - raise TranslationError('incompatible format for ' - 'placeholder %r: ' - '%r and %r are not compatible' % - (name, typechar, type_map[name])) - - -def _find_checkers(): - checkers = [] - try: - from pkg_resources import working_set - except ImportError: - pass - else: - for entry_point in working_set.iter_entry_points('babel.checkers'): - checkers.append(entry_point.load()) - if len(checkers) == 0: - # if pkg_resources is not available or no usable egg-info was found - # (see #230), just resort to hard-coded checkers - return [num_plurals, python_format] - return checkers - - -checkers = _find_checkers() diff --git a/vendor/babel/messages/extract.py b/vendor/babel/messages/extract.py deleted file mode 100644 index 2f8084af..00000000 --- a/vendor/babel/messages/extract.py +++ /dev/null @@ -1,562 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages.extract - ~~~~~~~~~~~~~~~~~~~~~~ - - Basic infrastructure for extracting localizable messages from source files. - - This module defines an extensible system for collecting localizable message - strings from a variety of sources. A native extractor for Python source - files is builtin, extractors for other sources can be added using very - simple plugins. - - The main entry points into the extraction functionality are the functions - `extract_from_dir` and `extract_from_file`. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -import os -import sys -from tokenize import generate_tokens, COMMENT, NAME, OP, STRING - -from babel.util import parse_encoding, pathmatch, relpath -from babel._compat import PY2, text_type -from textwrap import dedent - - -GROUP_NAME = 'babel.extractors' - -DEFAULT_KEYWORDS = { - '_': None, - 'gettext': None, - 'ngettext': (1, 2), - 'ugettext': None, - 'ungettext': (1, 2), - 'dgettext': (2,), - 'dngettext': (2, 3), - 'N_': None, - 'pgettext': ((1, 'c'), 2) -} - -DEFAULT_MAPPING = [('**.py', 'python')] - -empty_msgid_warning = ( -'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") ' -'returns the header entry with meta information, not the empty string.') - - -def _strip_comment_tags(comments, tags): - """Helper function for `extract` that strips comment tags from strings - in a list of comment lines. This functions operates in-place. - """ - def _strip(line): - for tag in tags: - if line.startswith(tag): - return line[len(tag):].strip() - return line - comments[:] = map(_strip, comments) - - -def extract_from_dir(dirname=None, method_map=DEFAULT_MAPPING, - options_map=None, keywords=DEFAULT_KEYWORDS, - comment_tags=(), callback=None, strip_comment_tags=False): - """Extract messages from any source files found in the given directory. - - This function generates tuples of the form ``(filename, lineno, message, - comments, context)``. - - Which extraction method is used per file is determined by the `method_map` - parameter, which maps extended glob patterns to extraction method names. - For example, the following is the default mapping: - - >>> method_map = [ - ... ('**.py', 'python') - ... ] - - This basically says that files with the filename extension ".py" at any - level inside the directory should be processed by the "python" extraction - method. Files that don't match any of the mapping patterns are ignored. See - the documentation of the `pathmatch` function for details on the pattern - syntax. - - The following extended mapping would also use the "genshi" extraction - method on any file in "templates" subdirectory: - - >>> method_map = [ - ... ('**/templates/**.*', 'genshi'), - ... ('**.py', 'python') - ... ] - - The dictionary provided by the optional `options_map` parameter augments - these mappings. It uses extended glob patterns as keys, and the values are - dictionaries mapping options names to option values (both strings). - - The glob patterns of the `options_map` do not necessarily need to be the - same as those used in the method mapping. For example, while all files in - the ``templates`` folders in an application may be Genshi applications, the - options for those files may differ based on extension: - - >>> options_map = { - ... '**/templates/**.txt': { - ... 'template_class': 'genshi.template:TextTemplate', - ... 'encoding': 'latin-1' - ... }, - ... '**/templates/**.html': { - ... 'include_attrs': '' - ... } - ... } - - :param dirname: the path to the directory to extract messages from. If - not given the current working directory is used. - :param method_map: a list of ``(pattern, method)`` tuples that maps of - extraction method names to extended glob patterns - :param options_map: a dictionary of additional options (optional) - :param keywords: a dictionary mapping keywords (i.e. names of functions - that should be recognized as translation functions) to - tuples that specify which of their arguments contain - localizable strings - :param comment_tags: a list of tags of translator comments to search for - and include in the results - :param callback: a function that is called for every file that message are - extracted from, just before the extraction itself is - performed; the function is passed the filename, the name - of the extraction method and and the options dictionary as - positional arguments, in that order - :param strip_comment_tags: a flag that if set to `True` causes all comment - tags to be removed from the collected comments. - :see: `pathmatch` - """ - if dirname is None: - dirname = os.getcwd() - if options_map is None: - options_map = {} - - absname = os.path.abspath(dirname) - for root, dirnames, filenames in os.walk(absname): - for subdir in dirnames: - if subdir.startswith('.') or subdir.startswith('_'): - dirnames.remove(subdir) - dirnames.sort() - filenames.sort() - for filename in filenames: - filename = relpath( - os.path.join(root, filename).replace(os.sep, '/'), - dirname - ) - for pattern, method in method_map: - if pathmatch(pattern, filename): - filepath = os.path.join(absname, filename) - options = {} - for opattern, odict in options_map.items(): - if pathmatch(opattern, filename): - options = odict - if callback: - callback(filename, method, options) - for lineno, message, comments, context in \ - extract_from_file(method, filepath, - keywords=keywords, - comment_tags=comment_tags, - options=options, - strip_comment_tags= - strip_comment_tags): - yield filename, lineno, message, comments, context - break - - -def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS, - comment_tags=(), options=None, strip_comment_tags=False): - """Extract messages from a specific file. - - This function returns a list of tuples of the form ``(lineno, funcname, - message)``. - - :param filename: the path to the file to extract messages from - :param method: a string specifying the extraction method (.e.g. "python") - :param keywords: a dictionary mapping keywords (i.e. names of functions - that should be recognized as translation functions) to - tuples that specify which of their arguments contain - localizable strings - :param comment_tags: a list of translator tags to search for and include - in the results - :param strip_comment_tags: a flag that if set to `True` causes all comment - tags to be removed from the collected comments. - :param options: a dictionary of additional options (optional) - """ - fileobj = open(filename, 'rb') - try: - return list(extract(method, fileobj, keywords, comment_tags, options, - strip_comment_tags)) - finally: - fileobj.close() - - -def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(), - options=None, strip_comment_tags=False): - """Extract messages from the given file-like object using the specified - extraction method. - - This function returns tuples of the form ``(lineno, message, comments)``. - - The implementation dispatches the actual extraction to plugins, based on the - value of the ``method`` parameter. - - >>> source = '''# foo module - ... def run(argv): - ... print _('Hello, world!') - ... ''' - - >>> from StringIO import StringIO - >>> for message in extract('python', StringIO(source)): - ... print message - (3, u'Hello, world!', [], None) - - :param method: a string specifying the extraction method (.e.g. "python"); - if this is a simple name, the extraction function will be - looked up by entry point; if it is an explicit reference - to a function (of the form ``package.module:funcname`` or - ``package.module.funcname``), the corresponding function - will be imported and used - :param fileobj: the file-like object the messages should be extracted from - :param keywords: a dictionary mapping keywords (i.e. names of functions - that should be recognized as translation functions) to - tuples that specify which of their arguments contain - localizable strings - :param comment_tags: a list of translator tags to search for and include - in the results - :param options: a dictionary of additional options (optional) - :param strip_comment_tags: a flag that if set to `True` causes all comment - tags to be removed from the collected comments. - :raise ValueError: if the extraction method is not registered - """ - func = None - if ':' in method or '.' in method: - if ':' not in method: - lastdot = method.rfind('.') - module, attrname = method[:lastdot], method[lastdot + 1:] - else: - module, attrname = method.split(':', 1) - func = getattr(__import__(module, {}, {}, [attrname]), attrname) - else: - try: - from pkg_resources import working_set - except ImportError: - pass - else: - for entry_point in working_set.iter_entry_points(GROUP_NAME, - method): - func = entry_point.load(require=True) - break - if func is None: - # if pkg_resources is not available or no usable egg-info was found - # (see #230), we resort to looking up the builtin extractors - # directly - builtin = { - 'ignore': extract_nothing, - 'python': extract_python, - 'javascript': extract_javascript - } - func = builtin.get(method) - if func is None: - raise ValueError('Unknown extraction method %r' % method) - - results = func(fileobj, keywords.keys(), comment_tags, - options=options or {}) - - for lineno, funcname, messages, comments in results: - if funcname: - spec = keywords[funcname] or (1,) - else: - spec = (1,) - if not isinstance(messages, (list, tuple)): - messages = [messages] - if not messages: - continue - - # Validate the messages against the keyword's specification - context = None - msgs = [] - invalid = False - # last_index is 1 based like the keyword spec - last_index = len(messages) - for index in spec: - if isinstance(index, tuple): - context = messages[index[0] - 1] - continue - if last_index < index: - # Not enough arguments - invalid = True - break - message = messages[index - 1] - if message is None: - invalid = True - break - msgs.append(message) - if invalid: - continue - - # keyword spec indexes are 1 based, therefore '-1' - if isinstance(spec[0], tuple): - # context-aware *gettext method - first_msg_index = spec[1] - 1 - else: - first_msg_index = spec[0] - 1 - if not messages[first_msg_index]: - # An empty string msgid isn't valid, emit a warning - where = '%s:%i' % (hasattr(fileobj, 'name') and \ - fileobj.name or '(unknown)', lineno) - sys.stderr.write((empty_msgid_warning % where) + '\n') - continue - - messages = tuple(msgs) - if len(messages) == 1: - messages = messages[0] - - if strip_comment_tags: - _strip_comment_tags(comments, comment_tags) - yield lineno, messages, comments, context - - -def extract_nothing(fileobj, keywords, comment_tags, options): - """Pseudo extractor that does not actually extract anything, but simply - returns an empty list. - """ - return [] - - -def extract_python(fileobj, keywords, comment_tags, options): - """Extract messages from Python source code. - - It returns an iterator yielding tuples in the following form ``(lineno, - funcname, message, comments)``. - - :param fileobj: the seekable, file-like object the messages should be - extracted from - :param keywords: a list of keywords (i.e. function names) that should be - recognized as translation functions - :param comment_tags: a list of translator tags to search for and include - in the results - :param options: a dictionary of additional options (optional) - :rtype: ``iterator`` - """ - funcname = lineno = message_lineno = None - call_stack = -1 - buf = [] - messages = [] - translator_comments = [] - in_def = in_translator_comments = False - comment_tag = None - - encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1') - - if PY2: - next_line = fileobj.readline - else: - next_line = lambda: fileobj.readline().decode(encoding) - - tokens = generate_tokens(next_line) - for tok, value, (lineno, _), _, _ in tokens: - if call_stack == -1 and tok == NAME and value in ('def', 'class'): - in_def = True - elif tok == OP and value == '(': - if in_def: - # Avoid false positives for declarations such as: - # def gettext(arg='message'): - in_def = False - continue - if funcname: - message_lineno = lineno - call_stack += 1 - elif in_def and tok == OP and value == ':': - # End of a class definition without parens - in_def = False - continue - elif call_stack == -1 and tok == COMMENT: - # Strip the comment token from the line - if PY2: - value = value.decode(encoding) - value = value[1:].strip() - if in_translator_comments and \ - translator_comments[-1][0] == lineno - 1: - # We're already inside a translator comment, continue appending - translator_comments.append((lineno, value)) - continue - # If execution reaches this point, let's see if comment line - # starts with one of the comment tags - for comment_tag in comment_tags: - if value.startswith(comment_tag): - in_translator_comments = True - translator_comments.append((lineno, value)) - break - elif funcname and call_stack == 0: - if tok == OP and value == ')': - if buf: - messages.append(''.join(buf)) - del buf[:] - else: - messages.append(None) - - if len(messages) > 1: - messages = tuple(messages) - else: - messages = messages[0] - # Comments don't apply unless they immediately preceed the - # message - if translator_comments and \ - translator_comments[-1][0] < message_lineno - 1: - translator_comments = [] - - yield (message_lineno, funcname, messages, - [comment[1] for comment in translator_comments]) - - funcname = lineno = message_lineno = None - call_stack = -1 - messages = [] - translator_comments = [] - in_translator_comments = False - elif tok == STRING: - # Unwrap quotes in a safe manner, maintaining the string's - # encoding - # https://sourceforge.net/tracker/?func=detail&atid=355470& - # aid=617979&group_id=5470 - value = eval('# coding=%s\n%s' % (str(encoding), value), - {'__builtins__':{}}, {}) - if PY2 and not isinstance(value, text_type): - value = value.decode(encoding) - buf.append(value) - elif tok == OP and value == ',': - if buf: - messages.append(''.join(buf)) - del buf[:] - else: - messages.append(None) - if translator_comments: - # We have translator comments, and since we're on a - # comma(,) user is allowed to break into a new line - # Let's increase the last comment's lineno in order - # for the comment to still be a valid one - old_lineno, old_comment = translator_comments.pop() - translator_comments.append((old_lineno+1, old_comment)) - elif call_stack > 0 and tok == OP and value == ')': - call_stack -= 1 - elif funcname and call_stack == -1: - funcname = None - elif tok == NAME and value in keywords: - funcname = value - - -def extract_javascript(fileobj, keywords, comment_tags, options): - """Extract messages from JavaScript source code. - - :param fileobj: the seekable, file-like object the messages should be - extracted from - :param keywords: a list of keywords (i.e. function names) that should be - recognized as translation functions - :param comment_tags: a list of translator tags to search for and include - in the results - :param options: a dictionary of additional options (optional) - """ - from babel.messages.jslexer import tokenize, unquote_string - funcname = message_lineno = None - messages = [] - last_argument = None - translator_comments = [] - concatenate_next = False - encoding = options.get('encoding', 'utf-8') - last_token = None - call_stack = -1 - - for token in tokenize(fileobj.read().decode(encoding)): - if token.type == 'operator' and token.value == '(': - if funcname: - message_lineno = token.lineno - call_stack += 1 - - elif call_stack == -1 and token.type == 'linecomment': - value = token.value[2:].strip() - if translator_comments and \ - translator_comments[-1][0] == token.lineno - 1: - translator_comments.append((token.lineno, value)) - continue - - for comment_tag in comment_tags: - if value.startswith(comment_tag): - translator_comments.append((token.lineno, value.strip())) - break - - elif token.type == 'multilinecomment': - # only one multi-line comment may preceed a translation - translator_comments = [] - value = token.value[2:-2].strip() - for comment_tag in comment_tags: - if value.startswith(comment_tag): - lines = value.splitlines() - if lines: - lines[0] = lines[0].strip() - lines[1:] = dedent('\n'.join(lines[1:])).splitlines() - for offset, line in enumerate(lines): - translator_comments.append((token.lineno + offset, - line)) - break - - elif funcname and call_stack == 0: - if token.type == 'operator' and token.value == ')': - if last_argument is not None: - messages.append(last_argument) - if len(messages) > 1: - messages = tuple(messages) - elif messages: - messages = messages[0] - else: - messages = None - - # Comments don't apply unless they immediately precede the - # message - if translator_comments and \ - translator_comments[-1][0] < message_lineno - 1: - translator_comments = [] - - if messages is not None: - yield (message_lineno, funcname, messages, - [comment[1] for comment in translator_comments]) - - funcname = message_lineno = last_argument = None - concatenate_next = False - translator_comments = [] - messages = [] - call_stack = -1 - - elif token.type == 'string': - new_value = unquote_string(token.value) - if concatenate_next: - last_argument = (last_argument or '') + new_value - concatenate_next = False - else: - last_argument = new_value - - elif token.type == 'operator': - if token.value == ',': - if last_argument is not None: - messages.append(last_argument) - last_argument = None - else: - messages.append(None) - concatenate_next = False - elif token.value == '+': - concatenate_next = True - - elif call_stack > 0 and token.type == 'operator' \ - and token.value == ')': - call_stack -= 1 - - elif funcname and call_stack == -1: - funcname = None - - elif call_stack == -1 and token.type == 'name' and \ - token.value in keywords and \ - (last_token is None or last_token.type != 'name' or - last_token.value != 'function'): - funcname = token.value - - last_token = token diff --git a/vendor/babel/messages/frontend.py b/vendor/babel/messages/frontend.py deleted file mode 100644 index 144bc98a..00000000 --- a/vendor/babel/messages/frontend.py +++ /dev/null @@ -1,1259 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages.frontend - ~~~~~~~~~~~~~~~~~~~~~~~ - - Frontends for the message extraction functionality. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -try: - from ConfigParser import RawConfigParser -except ImportError: - from configparser import RawConfigParser -from datetime import datetime -from distutils import log -from distutils.cmd import Command -from distutils.errors import DistutilsOptionError, DistutilsSetupError -from locale import getpreferredencoding -import logging -from optparse import OptionParser -import os -import re -import shutil -import sys -import tempfile - -from babel import __version__ as VERSION -from babel import Locale, localedata -from babel.core import UnknownLocaleError -from babel.messages.catalog import Catalog -from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \ - DEFAULT_MAPPING -from babel.messages.mofile import write_mo -from babel.messages.pofile import read_po, write_po -from babel.util import odict, LOCALTZ -from babel._compat import string_types, BytesIO, PY2 - - -class compile_catalog(Command): - """Catalog compilation command for use in ``setup.py`` scripts. - - If correctly installed, this command is available to Setuptools-using - setup scripts automatically. For projects using plain old ``distutils``, - the command needs to be registered explicitly in ``setup.py``:: - - from babel.messages.frontend import compile_catalog - - setup( - ... - cmdclass = {'compile_catalog': compile_catalog} - ) - - .. versionadded:: 0.9 - """ - - description = 'compile message catalogs to binary MO files' - user_options = [ - ('domain=', 'D', - "domain of PO file (default 'messages')"), - ('directory=', 'd', - 'path to base directory containing the catalogs'), - ('input-file=', 'i', - 'name of the input file'), - ('output-file=', 'o', - "name of the output file (default " - "'//LC_MESSAGES/.po')"), - ('locale=', 'l', - 'locale of the catalog to compile'), - ('use-fuzzy', 'f', - 'also include fuzzy translations'), - ('statistics', None, - 'print statistics about translations') - ] - boolean_options = ['use-fuzzy', 'statistics'] - - def initialize_options(self): - self.domain = 'messages' - self.directory = None - self.input_file = None - self.output_file = None - self.locale = None - self.use_fuzzy = False - self.statistics = False - - def finalize_options(self): - if not self.input_file and not self.directory: - raise DistutilsOptionError('you must specify either the input file ' - 'or the base directory') - if not self.output_file and not self.directory: - raise DistutilsOptionError('you must specify either the input file ' - 'or the base directory') - - def run(self): - po_files = [] - mo_files = [] - - if not self.input_file: - if self.locale: - po_files.append((self.locale, - os.path.join(self.directory, self.locale, - 'LC_MESSAGES', - self.domain + '.po'))) - mo_files.append(os.path.join(self.directory, self.locale, - 'LC_MESSAGES', - self.domain + '.mo')) - else: - for locale in os.listdir(self.directory): - po_file = os.path.join(self.directory, locale, - 'LC_MESSAGES', self.domain + '.po') - if os.path.exists(po_file): - po_files.append((locale, po_file)) - mo_files.append(os.path.join(self.directory, locale, - 'LC_MESSAGES', - self.domain + '.mo')) - else: - po_files.append((self.locale, self.input_file)) - if self.output_file: - mo_files.append(self.output_file) - else: - mo_files.append(os.path.join(self.directory, self.locale, - 'LC_MESSAGES', - self.domain + '.mo')) - - if not po_files: - raise DistutilsOptionError('no message catalogs found') - - for idx, (locale, po_file) in enumerate(po_files): - mo_file = mo_files[idx] - infile = open(po_file, 'r') - try: - catalog = read_po(infile, locale) - finally: - infile.close() - - if self.statistics: - translated = 0 - for message in list(catalog)[1:]: - if message.string: - translated +=1 - percentage = 0 - if len(catalog): - percentage = translated * 100 // len(catalog) - log.info('%d of %d messages (%d%%) translated in %r', - translated, len(catalog), percentage, po_file) - - if catalog.fuzzy and not self.use_fuzzy: - log.warn('catalog %r is marked as fuzzy, skipping', po_file) - continue - - for message, errors in catalog.check(): - for error in errors: - log.error('error: %s:%d: %s', po_file, message.lineno, - error) - - log.info('compiling catalog %r to %r', po_file, mo_file) - - outfile = open(mo_file, 'wb') - try: - write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy) - finally: - outfile.close() - - -class extract_messages(Command): - """Message extraction command for use in ``setup.py`` scripts. - - If correctly installed, this command is available to Setuptools-using - setup scripts automatically. For projects using plain old ``distutils``, - the command needs to be registered explicitly in ``setup.py``:: - - from babel.messages.frontend import extract_messages - - setup( - ... - cmdclass = {'extract_messages': extract_messages} - ) - """ - - description = 'extract localizable strings from the project code' - user_options = [ - ('charset=', None, - 'charset to use in the output file'), - ('keywords=', 'k', - 'space-separated list of keywords to look for in addition to the ' - 'defaults'), - ('no-default-keywords', None, - 'do not include the default keywords'), - ('mapping-file=', 'F', - 'path to the mapping configuration file'), - ('no-location', None, - 'do not include location comments with filename and line number'), - ('omit-header', None, - 'do not include msgid "" entry in header'), - ('output-file=', 'o', - 'name of the output file'), - ('width=', 'w', - 'set output line width (default 76)'), - ('no-wrap', None, - 'do not break long message lines, longer than the output line width, ' - 'into several lines'), - ('sort-output', None, - 'generate sorted output (default False)'), - ('sort-by-file', None, - 'sort output by file location (default False)'), - ('msgid-bugs-address=', None, - 'set report address for msgid'), - ('copyright-holder=', None, - 'set copyright holder in output'), - ('add-comments=', 'c', - 'place comment block with TAG (or those preceding keyword lines) in ' - 'output file. Separate multiple TAGs with commas(,)'), - ('strip-comments', None, - 'strip the comment TAGs from the comments.'), - ('input-dirs=', None, - 'directories that should be scanned for messages. Separate multiple ' - 'directories with commas(,)'), - ] - boolean_options = [ - 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap', - 'sort-output', 'sort-by-file', 'strip-comments' - ] - - def initialize_options(self): - self.charset = 'utf-8' - self.keywords = '' - self._keywords = DEFAULT_KEYWORDS.copy() - self.no_default_keywords = False - self.mapping_file = None - self.no_location = False - self.omit_header = False - self.output_file = None - self.input_dirs = None - self.width = None - self.no_wrap = False - self.sort_output = False - self.sort_by_file = False - self.msgid_bugs_address = None - self.copyright_holder = None - self.add_comments = None - self._add_comments = [] - self.strip_comments = False - - def finalize_options(self): - if self.no_default_keywords and not self.keywords: - raise DistutilsOptionError('you must specify new keywords if you ' - 'disable the default ones') - if self.no_default_keywords: - self._keywords = {} - if self.keywords: - self._keywords.update(parse_keywords(self.keywords.split())) - - if not self.output_file: - raise DistutilsOptionError('no output file specified') - if self.no_wrap and self.width: - raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " - "exclusive") - if not self.no_wrap and not self.width: - self.width = 76 - elif self.width is not None: - self.width = int(self.width) - - if self.sort_output and self.sort_by_file: - raise DistutilsOptionError("'--sort-output' and '--sort-by-file' " - "are mutually exclusive") - - if self.input_dirs: - self.input_dirs = re.split(',\s*', self.input_dirs) - else: - self.input_dirs = dict.fromkeys([k.split('.',1)[0] - for k in self.distribution.packages - ]).keys() - - if self.add_comments: - self._add_comments = self.add_comments.split(',') - - def run(self): - mappings = self._get_mappings() - outfile = open(self.output_file, 'wb') - try: - catalog = Catalog(project=self.distribution.get_name(), - version=self.distribution.get_version(), - msgid_bugs_address=self.msgid_bugs_address, - copyright_holder=self.copyright_holder, - charset=self.charset) - - for dirname, (method_map, options_map) in mappings.items(): - def callback(filename, method, options): - if method == 'ignore': - return - filepath = os.path.normpath(os.path.join(dirname, filename)) - optstr = '' - if options: - optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for - k, v in options.items()]) - log.info('extracting messages from %s%s', filepath, optstr) - - extracted = extract_from_dir(dirname, method_map, options_map, - keywords=self._keywords, - comment_tags=self._add_comments, - callback=callback, - strip_comment_tags= - self.strip_comments) - for filename, lineno, message, comments, context in extracted: - filepath = os.path.normpath(os.path.join(dirname, filename)) - catalog.add(message, None, [(filepath, lineno)], - auto_comments=comments, context=context) - - log.info('writing PO template file to %s' % self.output_file) - write_po(outfile, catalog, width=self.width, - no_location=self.no_location, - omit_header=self.omit_header, - sort_output=self.sort_output, - sort_by_file=self.sort_by_file) - finally: - outfile.close() - - def _get_mappings(self): - mappings = {} - - if self.mapping_file: - fileobj = open(self.mapping_file, 'U') - try: - method_map, options_map = parse_mapping(fileobj) - for dirname in self.input_dirs: - mappings[dirname] = method_map, options_map - finally: - fileobj.close() - - elif getattr(self.distribution, 'message_extractors', None): - message_extractors = self.distribution.message_extractors - for dirname, mapping in message_extractors.items(): - if isinstance(mapping, string_types): - method_map, options_map = parse_mapping(BytesIO(mapping)) - else: - method_map, options_map = [], {} - for pattern, method, options in mapping: - method_map.append((pattern, method)) - options_map[pattern] = options or {} - mappings[dirname] = method_map, options_map - - else: - for dirname in self.input_dirs: - mappings[dirname] = DEFAULT_MAPPING, {} - - return mappings - - -def check_message_extractors(dist, name, value): - """Validate the ``message_extractors`` keyword argument to ``setup()``. - - :param dist: the distutils/setuptools ``Distribution`` object - :param name: the name of the keyword argument (should always be - "message_extractors") - :param value: the value of the keyword argument - :raise `DistutilsSetupError`: if the value is not valid - """ - assert name == 'message_extractors' - if not isinstance(value, dict): - raise DistutilsSetupError('the value of the "message_extractors" ' - 'parameter must be a dictionary') - - -class init_catalog(Command): - """New catalog initialization command for use in ``setup.py`` scripts. - - If correctly installed, this command is available to Setuptools-using - setup scripts automatically. For projects using plain old ``distutils``, - the command needs to be registered explicitly in ``setup.py``:: - - from babel.messages.frontend import init_catalog - - setup( - ... - cmdclass = {'init_catalog': init_catalog} - ) - """ - - description = 'create a new catalog based on a POT file' - user_options = [ - ('domain=', 'D', - "domain of PO file (default 'messages')"), - ('input-file=', 'i', - 'name of the input file'), - ('output-dir=', 'd', - 'path to output directory'), - ('output-file=', 'o', - "name of the output file (default " - "'//LC_MESSAGES/.po')"), - ('locale=', 'l', - 'locale for the new localized catalog'), - ('width=', 'w', - 'set output line width (default 76)'), - ('no-wrap', None, - 'do not break long message lines, longer than the output line width, ' - 'into several lines'), - ] - boolean_options = ['no-wrap'] - - def initialize_options(self): - self.output_dir = None - self.output_file = None - self.input_file = None - self.locale = None - self.domain = 'messages' - self.no_wrap = False - self.width = None - - def finalize_options(self): - if not self.input_file: - raise DistutilsOptionError('you must specify the input file') - - if not self.locale: - raise DistutilsOptionError('you must provide a locale for the ' - 'new catalog') - try: - self._locale = Locale.parse(self.locale) - except UnknownLocaleError as e: - raise DistutilsOptionError(e) - - if not self.output_file and not self.output_dir: - raise DistutilsOptionError('you must specify the output directory') - if not self.output_file: - self.output_file = os.path.join(self.output_dir, self.locale, - 'LC_MESSAGES', self.domain + '.po') - - if not os.path.exists(os.path.dirname(self.output_file)): - os.makedirs(os.path.dirname(self.output_file)) - if self.no_wrap and self.width: - raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " - "exclusive") - if not self.no_wrap and not self.width: - self.width = 76 - elif self.width is not None: - self.width = int(self.width) - - def run(self): - log.info('creating catalog %r based on %r', self.output_file, - self.input_file) - - infile = open(self.input_file, 'r') - try: - # Although reading from the catalog template, read_po must be fed - # the locale in order to correctly calculate plurals - catalog = read_po(infile, locale=self.locale) - finally: - infile.close() - - catalog.locale = self._locale - catalog.revision_date = datetime.now(LOCALTZ) - catalog.fuzzy = False - - outfile = open(self.output_file, 'wb') - try: - write_po(outfile, catalog, width=self.width) - finally: - outfile.close() - - -class update_catalog(Command): - """Catalog merging command for use in ``setup.py`` scripts. - - If correctly installed, this command is available to Setuptools-using - setup scripts automatically. For projects using plain old ``distutils``, - the command needs to be registered explicitly in ``setup.py``:: - - from babel.messages.frontend import update_catalog - - setup( - ... - cmdclass = {'update_catalog': update_catalog} - ) - - .. versionadded:: 0.9 - """ - - description = 'update message catalogs from a POT file' - user_options = [ - ('domain=', 'D', - "domain of PO file (default 'messages')"), - ('input-file=', 'i', - 'name of the input file'), - ('output-dir=', 'd', - 'path to base directory containing the catalogs'), - ('output-file=', 'o', - "name of the output file (default " - "'//LC_MESSAGES/.po')"), - ('locale=', 'l', - 'locale of the catalog to compile'), - ('width=', 'w', - 'set output line width (default 76)'), - ('no-wrap', None, - 'do not break long message lines, longer than the output line width, ' - 'into several lines'), - ('ignore-obsolete=', None, - 'whether to omit obsolete messages from the output'), - ('no-fuzzy-matching', 'N', - 'do not use fuzzy matching'), - ('previous', None, - 'keep previous msgids of translated messages') - ] - boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous'] - - def initialize_options(self): - self.domain = 'messages' - self.input_file = None - self.output_dir = None - self.output_file = None - self.locale = None - self.width = None - self.no_wrap = False - self.ignore_obsolete = False - self.no_fuzzy_matching = False - self.previous = False - - def finalize_options(self): - if not self.input_file: - raise DistutilsOptionError('you must specify the input file') - if not self.output_file and not self.output_dir: - raise DistutilsOptionError('you must specify the output file or ' - 'directory') - if self.output_file and not self.locale: - raise DistutilsOptionError('you must specify the locale') - if self.no_wrap and self.width: - raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " - "exclusive") - if not self.no_wrap and not self.width: - self.width = 76 - elif self.width is not None: - self.width = int(self.width) - if self.no_fuzzy_matching and self.previous: - self.previous = False - - def run(self): - po_files = [] - if not self.output_file: - if self.locale: - po_files.append((self.locale, - os.path.join(self.output_dir, self.locale, - 'LC_MESSAGES', - self.domain + '.po'))) - else: - for locale in os.listdir(self.output_dir): - po_file = os.path.join(self.output_dir, locale, - 'LC_MESSAGES', - self.domain + '.po') - if os.path.exists(po_file): - po_files.append((locale, po_file)) - else: - po_files.append((self.locale, self.output_file)) - - domain = self.domain - if not domain: - domain = os.path.splitext(os.path.basename(self.input_file))[0] - - infile = open(self.input_file, 'U') - try: - template = read_po(infile) - finally: - infile.close() - - if not po_files: - raise DistutilsOptionError('no message catalogs found') - - for locale, filename in po_files: - log.info('updating catalog %r based on %r', filename, - self.input_file) - infile = open(filename, 'U') - try: - catalog = read_po(infile, locale=locale, domain=domain) - finally: - infile.close() - - catalog.update(template, self.no_fuzzy_matching) - - tmpname = os.path.join(os.path.dirname(filename), - tempfile.gettempprefix() + - os.path.basename(filename)) - tmpfile = open(tmpname, 'w') - try: - try: - write_po(tmpfile, catalog, - ignore_obsolete=self.ignore_obsolete, - include_previous=self.previous, width=self.width) - finally: - tmpfile.close() - except: - os.remove(tmpname) - raise - - try: - os.rename(tmpname, filename) - except OSError: - # We're probably on Windows, which doesn't support atomic - # renames, at least not through Python - # If the error is in fact due to a permissions problem, that - # same error is going to be raised from one of the following - # operations - os.remove(filename) - shutil.copy(tmpname, filename) - os.remove(tmpname) - - -class CommandLineInterface(object): - """Command-line interface. - - This class provides a simple command-line interface to the message - extraction and PO file generation functionality. - """ - - usage = '%%prog %s [options] %s' - version = '%%prog %s' % VERSION - commands = { - 'compile': 'compile message catalogs to MO files', - 'extract': 'extract messages from source files and generate a POT file', - 'init': 'create new message catalogs from a POT file', - 'update': 'update existing message catalogs from a POT file' - } - - def run(self, argv=sys.argv): - """Main entry point of the command-line interface. - - :param argv: list of arguments passed on the command-line - """ - self.parser = OptionParser(usage=self.usage % ('command', '[args]'), - version=self.version) - self.parser.disable_interspersed_args() - self.parser.print_help = self._help - self.parser.add_option('--list-locales', dest='list_locales', - action='store_true', - help="print all known locales and exit") - self.parser.add_option('-v', '--verbose', action='store_const', - dest='loglevel', const=logging.DEBUG, - help='print as much as possible') - self.parser.add_option('-q', '--quiet', action='store_const', - dest='loglevel', const=logging.ERROR, - help='print as little as possible') - self.parser.set_defaults(list_locales=False, loglevel=logging.INFO) - - options, args = self.parser.parse_args(argv[1:]) - - self._configure_logging(options.loglevel) - if options.list_locales: - identifiers = localedata.locale_identifiers() - longest = max([len(identifier) for identifier in identifiers]) - identifiers.sort() - format = u'%%-%ds %%s' % (longest + 1) - for identifier in identifiers: - locale = Locale.parse(identifier) - output = format % (identifier, locale.english_name) - print(output.encode(sys.stdout.encoding or - getpreferredencoding() or - 'ascii', 'replace')) - return 0 - - if not args: - self.parser.error('no valid command or option passed. ' - 'Try the -h/--help option for more information.') - - cmdname = args[0] - if cmdname not in self.commands: - self.parser.error('unknown command "%s"' % cmdname) - - return getattr(self, cmdname)(args[1:]) - - def _configure_logging(self, loglevel): - self.log = logging.getLogger('babel') - self.log.setLevel(loglevel) - # Don't add a new handler for every instance initialization (#227), this - # would cause duplicated output when the CommandLineInterface as an - # normal Python class. - if self.log.handlers: - handler = self.log.handlers[0] - else: - handler = logging.StreamHandler() - self.log.addHandler(handler) - handler.setLevel(loglevel) - formatter = logging.Formatter('%(message)s') - handler.setFormatter(formatter) - - def _help(self): - print(self.parser.format_help()) - print("commands:") - longest = max([len(command) for command in self.commands]) - format = " %%-%ds %%s" % max(8, longest + 1) - commands = sorted(self.commands.items()) - for name, description in commands: - print(format % (name, description)) - - def compile(self, argv): - """Subcommand for compiling a message catalog to a MO file. - - :param argv: the command arguments - :since: version 0.9 - """ - parser = OptionParser(usage=self.usage % ('compile', ''), - description=self.commands['compile']) - parser.add_option('--domain', '-D', dest='domain', - help="domain of MO and PO files (default '%default')") - parser.add_option('--directory', '-d', dest='directory', - metavar='DIR', help='base directory of catalog files') - parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', - help='locale of the catalog') - parser.add_option('--input-file', '-i', dest='input_file', - metavar='FILE', help='name of the input file') - parser.add_option('--output-file', '-o', dest='output_file', - metavar='FILE', - help="name of the output file (default " - "'//LC_MESSAGES/" - ".mo')") - parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy', - action='store_true', - help='also include fuzzy translations (default ' - '%default)') - parser.add_option('--statistics', dest='statistics', - action='store_true', - help='print statistics about translations') - - parser.set_defaults(domain='messages', use_fuzzy=False, - compile_all=False, statistics=False) - options, args = parser.parse_args(argv) - - po_files = [] - mo_files = [] - if not options.input_file: - if not options.directory: - parser.error('you must specify either the input file or the ' - 'base directory') - if options.locale: - po_files.append((options.locale, - os.path.join(options.directory, - options.locale, 'LC_MESSAGES', - options.domain + '.po'))) - mo_files.append(os.path.join(options.directory, options.locale, - 'LC_MESSAGES', - options.domain + '.mo')) - else: - for locale in os.listdir(options.directory): - po_file = os.path.join(options.directory, locale, - 'LC_MESSAGES', options.domain + '.po') - if os.path.exists(po_file): - po_files.append((locale, po_file)) - mo_files.append(os.path.join(options.directory, locale, - 'LC_MESSAGES', - options.domain + '.mo')) - else: - po_files.append((options.locale, options.input_file)) - if options.output_file: - mo_files.append(options.output_file) - else: - if not options.directory: - parser.error('you must specify either the input file or ' - 'the base directory') - mo_files.append(os.path.join(options.directory, options.locale, - 'LC_MESSAGES', - options.domain + '.mo')) - if not po_files: - parser.error('no message catalogs found') - - for idx, (locale, po_file) in enumerate(po_files): - mo_file = mo_files[idx] - infile = open(po_file, 'r') - try: - catalog = read_po(infile, locale) - finally: - infile.close() - - if options.statistics: - translated = 0 - for message in list(catalog)[1:]: - if message.string: - translated +=1 - percentage = 0 - if len(catalog): - percentage = translated * 100 // len(catalog) - self.log.info("%d of %d messages (%d%%) translated in %r", - translated, len(catalog), percentage, po_file) - - if catalog.fuzzy and not options.use_fuzzy: - self.log.warning('catalog %r is marked as fuzzy, skipping', - po_file) - continue - - for message, errors in catalog.check(): - for error in errors: - self.log.error('error: %s:%d: %s', po_file, message.lineno, - error) - - self.log.info('compiling catalog %r to %r', po_file, mo_file) - - outfile = open(mo_file, 'wb') - try: - write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy) - finally: - outfile.close() - - def extract(self, argv): - """Subcommand for extracting messages from source files and generating - a POT file. - - :param argv: the command arguments - """ - parser = OptionParser(usage=self.usage % ('extract', 'dir1 ...'), - description=self.commands['extract']) - parser.add_option('--charset', dest='charset', - help='charset to use in the output (default ' - '"%default")') - parser.add_option('-k', '--keyword', dest='keywords', action='append', - help='keywords to look for in addition to the ' - 'defaults. You can specify multiple -k flags on ' - 'the command line.') - parser.add_option('--no-default-keywords', dest='no_default_keywords', - action='store_true', - help="do not include the default keywords") - parser.add_option('--mapping', '-F', dest='mapping_file', - help='path to the extraction mapping file') - parser.add_option('--no-location', dest='no_location', - action='store_true', - help='do not include location comments with filename ' - 'and line number') - parser.add_option('--omit-header', dest='omit_header', - action='store_true', - help='do not include msgid "" entry in header') - parser.add_option('-o', '--output', dest='output', - help='path to the output POT file') - parser.add_option('-w', '--width', dest='width', type='int', - help="set output line width (default 76)") - parser.add_option('--no-wrap', dest='no_wrap', action='store_true', - help='do not break long message lines, longer than ' - 'the output line width, into several lines') - parser.add_option('--sort-output', dest='sort_output', - action='store_true', - help='generate sorted output (default False)') - parser.add_option('--sort-by-file', dest='sort_by_file', - action='store_true', - help='sort output by file location (default False)') - parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address', - metavar='EMAIL@ADDRESS', - help='set report address for msgid') - parser.add_option('--copyright-holder', dest='copyright_holder', - help='set copyright holder in output') - parser.add_option('--project', dest='project', - help='set project name in output') - parser.add_option('--version', dest='version', - help='set project version in output') - parser.add_option('--add-comments', '-c', dest='comment_tags', - metavar='TAG', action='append', - help='place comment block with TAG (or those ' - 'preceding keyword lines) in output file. One ' - 'TAG per argument call') - parser.add_option('--strip-comment-tags', '-s', - dest='strip_comment_tags', action='store_true', - help='Strip the comment tags from the comments.') - - parser.set_defaults(charset='utf-8', keywords=[], - no_default_keywords=False, no_location=False, - omit_header = False, width=None, no_wrap=False, - sort_output=False, sort_by_file=False, - comment_tags=[], strip_comment_tags=False) - options, args = parser.parse_args(argv) - if not args: - parser.error('incorrect number of arguments') - - keywords = DEFAULT_KEYWORDS.copy() - if options.no_default_keywords: - if not options.keywords: - parser.error('you must specify new keywords if you disable the ' - 'default ones') - keywords = {} - if options.keywords: - keywords.update(parse_keywords(options.keywords)) - - if options.mapping_file: - fileobj = open(options.mapping_file, 'U') - try: - method_map, options_map = parse_mapping(fileobj) - finally: - fileobj.close() - else: - method_map = DEFAULT_MAPPING - options_map = {} - - if options.width and options.no_wrap: - parser.error("'--no-wrap' and '--width' are mutually exclusive.") - elif not options.width and not options.no_wrap: - options.width = 76 - - if options.sort_output and options.sort_by_file: - parser.error("'--sort-output' and '--sort-by-file' are mutually " - "exclusive") - - catalog = Catalog(project=options.project, - version=options.version, - msgid_bugs_address=options.msgid_bugs_address, - copyright_holder=options.copyright_holder, - charset=options.charset) - - for dirname in args: - if not os.path.isdir(dirname): - parser.error('%r is not a directory' % dirname) - - def callback(filename, method, options): - if method == 'ignore': - return - filepath = os.path.normpath(os.path.join(dirname, filename)) - optstr = '' - if options: - optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for - k, v in options.items()]) - self.log.info('extracting messages from %s%s', filepath, - optstr) - - extracted = extract_from_dir(dirname, method_map, options_map, - keywords, options.comment_tags, - callback=callback, - strip_comment_tags= - options.strip_comment_tags) - for filename, lineno, message, comments, context in extracted: - filepath = os.path.normpath(os.path.join(dirname, filename)) - catalog.add(message, None, [(filepath, lineno)], - auto_comments=comments, context=context) - - catalog_charset = catalog.charset - if options.output not in (None, '-'): - self.log.info('writing PO template file to %s' % options.output) - outfile = open(options.output, 'wb') - close_output = True - else: - outfile = sys.stdout - - # This is a bit of a hack on Python 3. stdout is a text stream so - # we need to find the underlying file when we write the PO. In - # later versions of Babel we want the write_po function to accept - # text or binary streams and automatically adjust the encoding. - if not PY2 and hasattr(outfile, 'buffer'): - catalog.charset = outfile.encoding - outfile = outfile.buffer.raw - - close_output = False - - try: - write_po(outfile, catalog, width=options.width, - no_location=options.no_location, - omit_header=options.omit_header, - sort_output=options.sort_output, - sort_by_file=options.sort_by_file) - finally: - if close_output: - outfile.close() - catalog.charset = catalog_charset - - def init(self, argv): - """Subcommand for creating new message catalogs from a template. - - :param argv: the command arguments - """ - parser = OptionParser(usage=self.usage % ('init', ''), - description=self.commands['init']) - parser.add_option('--domain', '-D', dest='domain', - help="domain of PO file (default '%default')") - parser.add_option('--input-file', '-i', dest='input_file', - metavar='FILE', help='name of the input file') - parser.add_option('--output-dir', '-d', dest='output_dir', - metavar='DIR', help='path to output directory') - parser.add_option('--output-file', '-o', dest='output_file', - metavar='FILE', - help="name of the output file (default " - "'//LC_MESSAGES/" - ".po')") - parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', - help='locale for the new localized catalog') - parser.add_option('-w', '--width', dest='width', type='int', - help="set output line width (default 76)") - parser.add_option('--no-wrap', dest='no_wrap', action='store_true', - help='do not break long message lines, longer than ' - 'the output line width, into several lines') - - parser.set_defaults(domain='messages') - options, args = parser.parse_args(argv) - - if not options.locale: - parser.error('you must provide a locale for the new catalog') - try: - locale = Locale.parse(options.locale) - except UnknownLocaleError as e: - parser.error(e) - - if not options.input_file: - parser.error('you must specify the input file') - - if not options.output_file and not options.output_dir: - parser.error('you must specify the output file or directory') - - if not options.output_file: - options.output_file = os.path.join(options.output_dir, - options.locale, 'LC_MESSAGES', - options.domain + '.po') - if not os.path.exists(os.path.dirname(options.output_file)): - os.makedirs(os.path.dirname(options.output_file)) - if options.width and options.no_wrap: - parser.error("'--no-wrap' and '--width' are mutually exclusive.") - elif not options.width and not options.no_wrap: - options.width = 76 - - infile = open(options.input_file, 'r') - try: - # Although reading from the catalog template, read_po must be fed - # the locale in order to correctly calculate plurals - catalog = read_po(infile, locale=options.locale) - finally: - infile.close() - - catalog.locale = locale - catalog.revision_date = datetime.now(LOCALTZ) - - self.log.info('creating catalog %r based on %r', options.output_file, - options.input_file) - - outfile = open(options.output_file, 'wb') - try: - write_po(outfile, catalog, width=options.width) - finally: - outfile.close() - - def update(self, argv): - """Subcommand for updating existing message catalogs from a template. - - :param argv: the command arguments - :since: version 0.9 - """ - parser = OptionParser(usage=self.usage % ('update', ''), - description=self.commands['update']) - parser.add_option('--domain', '-D', dest='domain', - help="domain of PO file (default '%default')") - parser.add_option('--input-file', '-i', dest='input_file', - metavar='FILE', help='name of the input file') - parser.add_option('--output-dir', '-d', dest='output_dir', - metavar='DIR', help='path to output directory') - parser.add_option('--output-file', '-o', dest='output_file', - metavar='FILE', - help="name of the output file (default " - "'//LC_MESSAGES/" - ".po')") - parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', - help='locale of the translations catalog') - parser.add_option('-w', '--width', dest='width', type='int', - help="set output line width (default 76)") - parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true', - help='do not break long message lines, longer than ' - 'the output line width, into several lines') - parser.add_option('--ignore-obsolete', dest='ignore_obsolete', - action='store_true', - help='do not include obsolete messages in the output ' - '(default %default)') - parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching', - action='store_true', - help='do not use fuzzy matching (default %default)') - parser.add_option('--previous', dest='previous', action='store_true', - help='keep previous msgids of translated messages ' - '(default %default)') - - parser.set_defaults(domain='messages', ignore_obsolete=False, - no_fuzzy_matching=False, previous=False) - options, args = parser.parse_args(argv) - - if not options.input_file: - parser.error('you must specify the input file') - if not options.output_file and not options.output_dir: - parser.error('you must specify the output file or directory') - if options.output_file and not options.locale: - parser.error('you must specify the locale') - if options.no_fuzzy_matching and options.previous: - options.previous = False - - po_files = [] - if not options.output_file: - if options.locale: - po_files.append((options.locale, - os.path.join(options.output_dir, - options.locale, 'LC_MESSAGES', - options.domain + '.po'))) - else: - for locale in os.listdir(options.output_dir): - po_file = os.path.join(options.output_dir, locale, - 'LC_MESSAGES', - options.domain + '.po') - if os.path.exists(po_file): - po_files.append((locale, po_file)) - else: - po_files.append((options.locale, options.output_file)) - - domain = options.domain - if not domain: - domain = os.path.splitext(os.path.basename(options.input_file))[0] - - infile = open(options.input_file, 'U') - try: - template = read_po(infile) - finally: - infile.close() - - if not po_files: - parser.error('no message catalogs found') - - if options.width and options.no_wrap: - parser.error("'--no-wrap' and '--width' are mutually exclusive.") - elif not options.width and not options.no_wrap: - options.width = 76 - for locale, filename in po_files: - self.log.info('updating catalog %r based on %r', filename, - options.input_file) - infile = open(filename, 'U') - try: - catalog = read_po(infile, locale=locale, domain=domain) - finally: - infile.close() - - catalog.update(template, options.no_fuzzy_matching) - - tmpname = os.path.join(os.path.dirname(filename), - tempfile.gettempprefix() + - os.path.basename(filename)) - tmpfile = open(tmpname, 'w') - try: - try: - write_po(tmpfile, catalog, - ignore_obsolete=options.ignore_obsolete, - include_previous=options.previous, - width=options.width) - finally: - tmpfile.close() - except: - os.remove(tmpname) - raise - - try: - os.rename(tmpname, filename) - except OSError: - # We're probably on Windows, which doesn't support atomic - # renames, at least not through Python - # If the error is in fact due to a permissions problem, that - # same error is going to be raised from one of the following - # operations - os.remove(filename) - shutil.copy(tmpname, filename) - os.remove(tmpname) - - -def main(): - return CommandLineInterface().run(sys.argv) - - -def parse_mapping(fileobj, filename=None): - """Parse an extraction method mapping from a file-like object. - - >>> buf = BytesIO(b''' - ... [extractors] - ... custom = mypackage.module:myfunc - ... - ... # Python source files - ... [python: **.py] - ... - ... # Genshi templates - ... [genshi: **/templates/**.html] - ... include_attrs = - ... [genshi: **/templates/**.txt] - ... template_class = genshi.template:TextTemplate - ... encoding = latin-1 - ... - ... # Some custom extractor - ... [custom: **/custom/*.*] - ... ''') - - >>> method_map, options_map = parse_mapping(buf) - >>> len(method_map) - 4 - - >>> method_map[0] - ('**.py', 'python') - >>> options_map['**.py'] - {} - >>> method_map[1] - ('**/templates/**.html', 'genshi') - >>> options_map['**/templates/**.html']['include_attrs'] - '' - >>> method_map[2] - ('**/templates/**.txt', 'genshi') - >>> options_map['**/templates/**.txt']['template_class'] - 'genshi.template:TextTemplate' - >>> options_map['**/templates/**.txt']['encoding'] - 'latin-1' - - >>> method_map[3] - ('**/custom/*.*', 'mypackage.module:myfunc') - >>> options_map['**/custom/*.*'] - {} - - :param fileobj: a readable file-like object containing the configuration - text to parse - :see: `extract_from_directory` - """ - extractors = {} - method_map = [] - options_map = {} - - parser = RawConfigParser() - parser._sections = odict(parser._sections) # We need ordered sections - parser.readfp(fileobj, filename) - for section in parser.sections(): - if section == 'extractors': - extractors = dict(parser.items(section)) - else: - method, pattern = [part.strip() for part in section.split(':', 1)] - method_map.append((pattern, method)) - options_map[pattern] = dict(parser.items(section)) - - if extractors: - for idx, (pattern, method) in enumerate(method_map): - if method in extractors: - method = extractors[method] - method_map[idx] = (pattern, method) - - return (method_map, options_map) - - -def parse_keywords(strings=[]): - """Parse keywords specifications from the given list of strings. - - >>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items() - >>> kw.sort() - >>> for keyword, indices in kw: - ... print (keyword, indices) - ('_', None) - ('dgettext', (2,)) - ('dngettext', (2, 3)) - ('pgettext', ((1, 'c'), 2)) - """ - keywords = {} - for string in strings: - if ':' in string: - funcname, indices = string.split(':') - else: - funcname, indices = string, None - if funcname not in keywords: - if indices: - inds = [] - for x in indices.split(','): - if x[-1] == 'c': - inds.append((int(x[:-1]), 'c')) - else: - inds.append(int(x)) - indices = tuple(inds) - keywords[funcname] = indices - return keywords - - -if __name__ == '__main__': - main() diff --git a/vendor/babel/messages/jslexer.py b/vendor/babel/messages/jslexer.py deleted file mode 100644 index 22c6e1f9..00000000 --- a/vendor/babel/messages/jslexer.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- coding: utf-8 -*- -""" - babel.messages.jslexer - ~~~~~~~~~~~~~~~~~~~~~~ - - A simple JavaScript 1.5 lexer which is used for the JavaScript - extractor. - - :copyright: (c) 2013 by the Babel Team. - :license: BSD, see LICENSE for more details. -""" - -from operator import itemgetter -import re -from babel._compat import unichr - -operators = [ - '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=', - '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=', - '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')', - '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':' -] -operators.sort(key=lambda a: -len(a)) - -escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'} - -rules = [ - (None, re.compile(r'\s+(?u)')), - (None, re.compile(r'' % ( - render_traceback(self, full=full), - self.render_as_text().decode('utf-8', 'replace') - ) - - @property - def is_template_syntax_error(self): - """`True` if this is a template syntax error.""" - return isinstance(self.exc_value, TemplateSyntaxError) - - @property - def exc_info(self): - """Exception info tuple with a proxy around the frame objects.""" - return self.exc_type, self.exc_value, self.frames[0] - - @property - def standard_exc_info(self): - """Standard python exc_info for re-raising""" - tb = self.frames[0] - # the frame will be an actual traceback (or transparent proxy) if - # we are on pypy or a python implementation with support for tproxy - if type(tb) is not TracebackType: - tb = tb.tb - return self.exc_type, self.exc_value, tb - - -def make_traceback(exc_info, source_hint=None): - """Creates a processed traceback object from the exc_info.""" - exc_type, exc_value, tb = exc_info - if isinstance(exc_value, TemplateSyntaxError): - exc_info = translate_syntax_error(exc_value, source_hint) - initial_skip = 0 - else: - initial_skip = 1 - return translate_exception(exc_info, initial_skip) - - -def translate_syntax_error(error, source=None): - """Rewrites a syntax error to please traceback systems.""" - error.source = source - error.translated = True - exc_info = (error.__class__, error, None) - filename = error.filename - if filename is None: - filename = '' - return fake_exc_info(exc_info, filename, error.lineno) - - -def translate_exception(exc_info, initial_skip=0): - """If passed an exc_info it will automatically rewrite the exceptions - all the way down to the correct line numbers and frames. - """ - tb = exc_info[2] - frames = [] - - # skip some internal frames if wanted - for x in range(initial_skip): - if tb is not None: - tb = tb.tb_next - initial_tb = tb - - while tb is not None: - # skip frames decorated with @internalcode. These are internal - # calls we can't avoid and that are useless in template debugging - # output. - if tb.tb_frame.f_code in internal_code: - tb = tb.tb_next - continue - - # save a reference to the next frame if we override the current - # one with a faked one. - next = tb.tb_next - - # fake template exceptions - template = tb.tb_frame.f_globals.get('__jinja_template__') - if template is not None: - lineno = template.get_corresponding_lineno(tb.tb_lineno) - tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, - lineno)[2] - - frames.append(make_frame_proxy(tb)) - tb = next - - # if we don't have any exceptions in the frames left, we have to - # reraise it unchanged. - # XXX: can we backup here? when could this happen? - if not frames: - reraise(exc_info[0], exc_info[1], exc_info[2]) - - return ProcessedTraceback(exc_info[0], exc_info[1], frames) - - -def fake_exc_info(exc_info, filename, lineno): - """Helper for `translate_exception`.""" - exc_type, exc_value, tb = exc_info - - # figure the real context out - if tb is not None: - real_locals = tb.tb_frame.f_locals.copy() - ctx = real_locals.get('context') - if ctx: - locals = ctx.get_all() - else: - locals = {} - for name, value in iteritems(real_locals): - if name.startswith('l_') and value is not missing: - locals[name[2:]] = value - - # if there is a local called __jinja_exception__, we get - # rid of it to not break the debug functionality. - locals.pop('__jinja_exception__', None) - else: - locals = {} - - # assamble fake globals we need - globals = { - '__name__': filename, - '__file__': filename, - '__jinja_exception__': exc_info[:2], - - # we don't want to keep the reference to the template around - # to not cause circular dependencies, but we mark it as Jinja - # frame for the ProcessedTraceback - '__jinja_template__': None - } - - # and fake the exception - code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') - - # if it's possible, change the name of the code. This won't work - # on some python environments such as google appengine - try: - if tb is None: - location = 'template' - else: - function = tb.tb_frame.f_code.co_name - if function == 'root': - location = 'top-level template code' - elif function.startswith('block_'): - location = 'block "%s"' % function[6:] - else: - location = 'template' - - if PY2: - code = CodeType(0, code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - else: - code = CodeType(0, code.co_kwonlyargcount, - code.co_nlocals, code.co_stacksize, - code.co_flags, code.co_code, code.co_consts, - code.co_names, code.co_varnames, filename, - location, code.co_firstlineno, - code.co_lnotab, (), ()) - except Exception as e: - pass - - # execute the code and catch the new traceback - try: - exec(code, globals, locals) - except: - exc_info = sys.exc_info() - new_tb = exc_info[2].tb_next - - # return without this frame - return exc_info[:2] + (new_tb,) - - -def _init_ugly_crap(): - """This function implements a few ugly things so that we can patch the - traceback objects. The function returned allows resetting `tb_next` on - any python traceback object. Do not attempt to use this on non cpython - interpreters - """ - import ctypes - from types import TracebackType - - if PY2: - # figure out size of _Py_ssize_t for Python 2: - if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): - _Py_ssize_t = ctypes.c_int64 - else: - _Py_ssize_t = ctypes.c_int - else: - # platform ssize_t on Python 3 - _Py_ssize_t = ctypes.c_ssize_t - - # regular python - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) - ] - - # python with trace - if hasattr(sys, 'getobjects'): - class _PyObject(ctypes.Structure): - pass - _PyObject._fields_ = [ - ('_ob_next', ctypes.POINTER(_PyObject)), - ('_ob_prev', ctypes.POINTER(_PyObject)), - ('ob_refcnt', _Py_ssize_t), - ('ob_type', ctypes.POINTER(_PyObject)) - ] - - class _Traceback(_PyObject): - pass - _Traceback._fields_ = [ - ('tb_next', ctypes.POINTER(_Traceback)), - ('tb_frame', ctypes.POINTER(_PyObject)), - ('tb_lasti', ctypes.c_int), - ('tb_lineno', ctypes.c_int) - ] - - def tb_set_next(tb, next): - """Set the tb_next attribute of a traceback object.""" - if not (isinstance(tb, TracebackType) and - (next is None or isinstance(next, TracebackType))): - raise TypeError('tb_set_next arguments must be traceback objects') - obj = _Traceback.from_address(id(tb)) - if tb.tb_next is not None: - old = _Traceback.from_address(id(tb.tb_next)) - old.ob_refcnt -= 1 - if next is None: - obj.tb_next = ctypes.POINTER(_Traceback)() - else: - next = _Traceback.from_address(id(next)) - next.ob_refcnt += 1 - obj.tb_next = ctypes.pointer(next) - - return tb_set_next - - -# try to get a tb_set_next implementation if we don't have transparent -# proxies. -tb_set_next = None -if tproxy is None: - try: - tb_set_next = _init_ugly_crap() - except: - pass - del _init_ugly_crap diff --git a/vendor/jinja2/defaults.py b/vendor/jinja2/defaults.py deleted file mode 100644 index 3717a722..00000000 --- a/vendor/jinja2/defaults.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.defaults - ~~~~~~~~~~~~~~~ - - Jinja default filters and tags. - - :copyright: (c) 2010 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -from jinja2._compat import range_type -from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner - - -# defaults for the parser / lexer -BLOCK_START_STRING = '{%' -BLOCK_END_STRING = '%}' -VARIABLE_START_STRING = '{{' -VARIABLE_END_STRING = '}}' -COMMENT_START_STRING = '{#' -COMMENT_END_STRING = '#}' -LINE_STATEMENT_PREFIX = None -LINE_COMMENT_PREFIX = None -TRIM_BLOCKS = False -LSTRIP_BLOCKS = False -NEWLINE_SEQUENCE = '\n' -KEEP_TRAILING_NEWLINE = False - - -# default filters, tests and namespace -from jinja2.filters import FILTERS as DEFAULT_FILTERS -from jinja2.tests import TESTS as DEFAULT_TESTS -DEFAULT_NAMESPACE = { - 'range': range_type, - 'dict': dict, - 'lipsum': generate_lorem_ipsum, - 'cycler': Cycler, - 'joiner': Joiner -} - - -# export all constants -__all__ = tuple(x for x in locals().keys() if x.isupper()) diff --git a/vendor/jinja2/environment.py b/vendor/jinja2/environment.py deleted file mode 100644 index 8b2572bb..00000000 --- a/vendor/jinja2/environment.py +++ /dev/null @@ -1,1213 +0,0 @@ -# -*- coding: utf-8 -*- -""" - jinja2.environment - ~~~~~~~~~~~~~~~~~~ - - Provides a class that holds runtime and parsing time options. - - :copyright: (c) 2010 by the Jinja Team. - :license: BSD, see LICENSE for more details. -""" -import os -import sys -from jinja2 import nodes -from jinja2.defaults import BLOCK_START_STRING, \ - BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ - COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ - LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ - DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \ - KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS -from jinja2.lexer import get_lexer, TokenStream -from jinja2.parser import Parser -from jinja2.nodes import EvalContext -from jinja2.optimizer import optimize -from jinja2.compiler import generate, CodeGenerator -from jinja2.runtime import Undefined, new_context, Context -from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ - TemplatesNotFound, TemplateRuntimeError -from jinja2.utils import import_string, LRUCache, Markup, missing, \ - concat, consume, internalcode -from jinja2._compat import imap, ifilter, string_types, iteritems, \ - text_type, reraise, implements_iterator, implements_to_string, \ - get_next, encode_filename, PY2, PYPY -from functools import reduce - - -# for direct template usage we have up to ten living environments -_spontaneous_environments = LRUCache(10) - -# the function to create jinja traceback objects. This is dynamically -# imported on the first exception in the exception handler. -_make_traceback = None - - -def get_spontaneous_environment(*args): - """Return a new spontaneous environment. A spontaneous environment is an - unnamed and unaccessible (in theory) environment that is used for - templates generated from a string and not from the file system. - """ - try: - env = _spontaneous_environments.get(args) - except TypeError: - return Environment(*args) - if env is not None: - return env - _spontaneous_environments[args] = env = Environment(*args) - env.shared = True - return env - - -def create_cache(size): - """Return the cache class for the given size.""" - if size == 0: - return None - if size < 0: - return {} - return LRUCache(size) - - -def copy_cache(cache): - """Create an empty copy of the given cache.""" - if cache is None: - return None - elif type(cache) is dict: - return {} - return LRUCache(cache.capacity) - - -def load_extensions(environment, extensions): - """Load the extensions from the list and bind it to the environment. - Returns a dict of instantiated environments. - """ - result = {} - for extension in extensions: - if isinstance(extension, string_types): - extension = import_string(extension) - result[extension.identifier] = extension(environment) - return result - - -def _environment_sanity_check(environment): - """Perform a sanity check on the environment.""" - assert issubclass(environment.undefined, Undefined), 'undefined must ' \ - 'be a subclass of undefined because filters depend on it.' - assert environment.block_start_string != \ - environment.variable_start_string != \ - environment.comment_start_string, 'block, variable and comment ' \ - 'start strings must be different' - assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ - 'newline_sequence set to unknown line ending string.' - return environment - - -class Environment(object): - r"""The core component of Jinja is the `Environment`. It contains - important shared variables like configuration, filters, tests, - globals and others. Instances of this class may be modified if - they are not shared and if no template was loaded so far. - Modifications on environments after the first template was loaded - will lead to surprising effects and undefined behavior. - - Here are the possible initialization parameters: - - `block_start_string` - The string marking the beginning of a block. Defaults to ``'{%'``. - - `block_end_string` - The string marking the end of a block. Defaults to ``'%}'``. - - `variable_start_string` - The string marking the beginning of a print statement. - Defaults to ``'{{'``. - - `variable_end_string` - The string marking the end of a print statement. Defaults to - ``'}}'``. - - `comment_start_string` - The string marking the beginning of a comment. Defaults to ``'{#'``. - - `comment_end_string` - The string marking the end of a comment. Defaults to ``'#}'``. - - `line_statement_prefix` - If given and a string, this will be used as prefix for line based - statements. See also :ref:`line-statements`. - - `line_comment_prefix` - If given and a string, this will be used as prefix for line based - comments. See also :ref:`line-statements`. - - .. versionadded:: 2.2 - - `trim_blocks` - If this is set to ``True`` the first newline after a block is - removed (block, not variable tag!). Defaults to `False`. - - `lstrip_blocks` - If this is set to ``True`` leading spaces and tabs are stripped - from the start of a line to a block. Defaults to `False`. - - `newline_sequence` - The sequence that starts a newline. Must be one of ``'\r'``, - ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a - useful default for Linux and OS X systems as well as web - applications. - - `keep_trailing_newline` - Preserve the trailing newline when rendering templates. - The default is ``False``, which causes a single newline, - if present, to be stripped from the end of the template. - - .. versionadded:: 2.7 - - `extensions` - List of Jinja extensions to use. This can either be import paths - as strings or extension classes. For more information have a - look at :ref:`the extensions documentation `. - - `optimized` - should the optimizer be enabled? Default is `True`. - - `undefined` - :class:`Undefined` or a subclass of it that is used to represent - undefined values in the template. - - `finalize` - A callable that can be used to process the result of a variable - expression before it is output. For example one can convert - `None` implicitly into an empty string here. - - `autoescape` - If set to true the XML/HTML autoescaping feature is enabled by - default. For more details about autoescaping see - :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also - be a callable that is passed the template name and has to - return `True` or `False` depending on autoescape should be - enabled by default. - - .. versionchanged:: 2.4 - `autoescape` can now be a function - - `loader` - The template loader for this environment. - - `cache_size` - The size of the cache. Per default this is ``400`` which means - that if more than 400 templates are loaded the loader will clean - out the least recently used template. If the cache size is set to - ``0`` templates are recompiled all the time, if the cache size is - ``-1`` the cache will not be cleaned. - - .. versionchanged:: 2.8 - The cache size was increased to 400 from a low 50. - - `auto_reload` - Some loaders load templates from locations where the template - sources may change (ie: file system or database). If - `auto_reload` is set to `True` (default) every time a template is - requested the loader checks if the source changed and if yes, it - will reload the template. For higher performance it's possible to - disable that. - - `bytecode_cache` - If set to a bytecode cache object, this object will provide a - cache for the internal Jinja bytecode so that templates don't - have to be parsed if they were not changed. - - See :ref:`bytecode-cache` for more information. - """ - - #: if this environment is sandboxed. Modifying this variable won't make - #: the environment sandboxed though. For a real sandboxed environment - #: have a look at jinja2.sandbox. This flag alone controls the code - #: generation by the compiler. - sandboxed = False - - #: True if the environment is just an overlay - overlayed = False - - #: the environment this environment is linked to if it is an overlay - linked_to = None - - #: shared environments have this set to `True`. A shared environment - #: must not be modified - shared = False - - #: these are currently EXPERIMENTAL undocumented features. - exception_handler = None - exception_formatter = None - - #: the class that is used for code generation. See - #: :class:`~jinja2.compiler.CodeGenerator` for more information. - code_generator_class = CodeGenerator - - #: the context class thatis used for templates. See - #: :class:`~jinja2.runtime.Context` for more information. - context_class = Context - - def __init__(self, - block_start_string=BLOCK_START_STRING, - block_end_string=BLOCK_END_STRING, - variable_start_string=VARIABLE_START_STRING, - variable_end_string=VARIABLE_END_STRING, - comment_start_string=COMMENT_START_STRING, - comment_end_string=COMMENT_END_STRING, - line_statement_prefix=LINE_STATEMENT_PREFIX, - line_comment_prefix=LINE_COMMENT_PREFIX, - trim_blocks=TRIM_BLOCKS, - lstrip_blocks=LSTRIP_BLOCKS, - newline_sequence=NEWLINE_SEQUENCE, - keep_trailing_newline=KEEP_TRAILING_NEWLINE, - extensions=(), - optimized=True, - undefined=Undefined, - finalize=None, - autoescape=False, - loader=None, - cache_size=400, - auto_reload=True, - bytecode_cache=None): - # !!Important notice!! - # The constructor accepts quite a few arguments that should be - # passed by keyword rather than position. However it's important to - # not change the order of arguments because it's used at least - # internally in those cases: - # - spontaneous environments (i18n extension and Template) - # - unittests - # If parameter changes are required only add parameters at the end - # and don't change the arguments (or the defaults!) of the arguments - # existing already. - - # lexer / parser information - self.block_start_string = block_start_string - self.block_end_string = block_end_string - self.variable_start_string = variable_start_string - self.variable_end_string = variable_end_string - self.comment_start_string = comment_start_string - self.comment_end_string = comment_end_string - self.line_statement_prefix = line_statement_prefix - self.line_comment_prefix = line_comment_prefix - self.trim_blocks = trim_blocks - self.lstrip_blocks = lstrip_blocks - self.newline_sequence = newline_sequence - self.keep_trailing_newline = keep_trailing_newline - - # runtime information - self.undefined = undefined - self.optimized = optimized - self.finalize = finalize - self.autoescape = autoescape - - # defaults - self.filters = DEFAULT_FILTERS.copy() - self.tests = DEFAULT_TESTS.copy() - self.globals = DEFAULT_NAMESPACE.copy() - - # set the loader provided - self.loader = loader - self.cache = create_cache(cache_size) - self.bytecode_cache = bytecode_cache - self.auto_reload = auto_reload - - # load extensions - self.extensions = load_extensions(self, extensions) - - _environment_sanity_check(self) - - def add_extension(self, extension): - """Adds an extension after the environment was created. - - .. versionadded:: 2.5 - """ - self.extensions.update(load_extensions(self, [extension])) - - def extend(self, **attributes): - """Add the items to the instance of the environment if they do not exist - yet. This is used by :ref:`extensions ` to register - callbacks and configuration values without breaking inheritance. - """ - for key, value in iteritems(attributes): - if not hasattr(self, key): - setattr(self, key, value) - - def overlay(self, block_start_string=missing, block_end_string=missing, - variable_start_string=missing, variable_end_string=missing, - comment_start_string=missing, comment_end_string=missing, - line_statement_prefix=missing, line_comment_prefix=missing, - trim_blocks=missing, lstrip_blocks=missing, - extensions=missing, optimized=missing, - undefined=missing, finalize=missing, autoescape=missing, - loader=missing, cache_size=missing, auto_reload=missing, - bytecode_cache=missing): - """Create a new overlay environment that shares all the data with the - current environment except for cache and the overridden attributes. - Extensions cannot be removed for an overlayed environment. An overlayed - environment automatically gets all the extensions of the environment it - is linked to plus optional extra extensions. - - Creating overlays should happen after the initial environment was set - up completely. Not all attributes are truly linked, some are just - copied over so modifications on the original environment may not shine - through. - """ - args = dict(locals()) - del args['self'], args['cache_size'], args['extensions'] - - rv = object.__new__(self.__class__) - rv.__dict__.update(self.__dict__) - rv.overlayed = True - rv.linked_to = self - - for key, value in iteritems(args): - if value is not missing: - setattr(rv, key, value) - - if cache_size is not missing: - rv.cache = create_cache(cache_size) - else: - rv.cache = copy_cache(self.cache) - - rv.extensions = {} - for key, value in iteritems(self.extensions): - rv.extensions[key] = value.bind(rv) - if extensions is not missing: - rv.extensions.update(load_extensions(rv, extensions)) - - return _environment_sanity_check(rv) - - lexer = property(get_lexer, doc="The lexer for this environment.") - - def iter_extensions(self): - """Iterates over the extensions by priority.""" - return iter(sorted(self.extensions.values(), - key=lambda x: x.priority)) - - def getitem(self, obj, argument): - """Get an item or attribute of an object but prefer the item.""" - try: - return obj[argument] - except (TypeError, LookupError): - if isinstance(argument, string_types): - try: - attr = str(argument) - except Exception: - pass - else: - try: - return getattr(obj, attr) - except AttributeError: - pass - return self.undefined(obj=obj, name=argument) - - def getattr(self, obj, attribute): - """Get an item or attribute of an object but prefer the attribute. - Unlike :meth:`getitem` the attribute *must* be a bytestring. - """ - try: - return getattr(obj, attribute) - except AttributeError: - pass - try: - return obj[attribute] - except (TypeError, LookupError, AttributeError): - return self.undefined(obj=obj, name=attribute) - - def call_filter(self, name, value, args=None, kwargs=None, - context=None, eval_ctx=None): - """Invokes a filter on a value the same way the compiler does it. - - .. versionadded:: 2.7 - """ - func = self.filters.get(name) - if func is None: - raise TemplateRuntimeError('no filter named %r' % name) - args = [value] + list(args or ()) - if getattr(func, 'contextfilter', False): - if context is None: - raise TemplateRuntimeError('Attempted to invoke context ' - 'filter without context') - args.insert(0, context) - elif getattr(func, 'evalcontextfilter', False): - if eval_ctx is None: - if context is not None: - eval_ctx = context.eval_ctx - else: - eval_ctx = EvalContext(self) - args.insert(0, eval_ctx) - elif getattr(func, 'environmentfilter', False): - args.insert(0, self) - return func(*args, **(kwargs or {})) - - def call_test(self, name, value, args=None, kwargs=None): - """Invokes a test on a value the same way the compiler does it. - - .. versionadded:: 2.7 - """ - func = self.tests.get(name) - if func is None: - raise TemplateRuntimeError('no test named %r' % name) - return func(value, *(args or ()), **(kwargs or {})) - - @internalcode - def parse(self, source, name=None, filename=None): - """Parse the sourcecode and return the abstract syntax tree. This - tree of nodes is used by the compiler to convert the template into - executable source- or bytecode. This is useful for debugging or to - extract information from templates. - - If you are :ref:`developing Jinja2 extensions ` - this gives you a good overview of the node tree generated. - """ - try: - return self._parse(source, name, filename) - except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) - - def _parse(self, source, name, filename): - """Internal parsing function used by `parse` and `compile`.""" - return Parser(self, source, name, encode_filename(filename)).parse() - - def lex(self, source, name=None, filename=None): - """Lex the given sourcecode and return a generator that yields - tokens as tuples in the form ``(lineno, token_type, value)``. - This can be useful for :ref:`extension development ` - and debugging templates. - - This does not perform preprocessing. If you want the preprocessing - of the extensions to be applied you have to filter source through - the :meth:`preprocess` method. - """ - source = text_type(source) - try: - return self.lexer.tokeniter(source, name, filename) - except TemplateSyntaxError: - exc_info = sys.exc_info() - self.handle_exception(exc_info, source_hint=source) - - def preprocess(self, source, name=None, filename=None): - """Preprocesses the source with all extensions. This is automatically - called for all parsing and compiling methods but *not* for :meth:`lex` - because there you usually only want the actual source tokenized. - """ - return reduce(lambda s, e: e.preprocess(s, name, filename), - self.iter_extensions(), text_type(source)) - - def _tokenize(self, source, name, filename=None, state=None): - """Called by the parser to do the preprocessing and filtering - for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. - """ - source = self.preprocess(source, name, filename) - stream = self.lexer.tokenize(source, name, filename, state) - for ext in self.iter_extensions(): - stream = ext.filter_stream(stream) - if not isinstance(stream, TokenStream): - stream = TokenStream(stream, name, filename) - return stream - - def _generate(self, source, name, filename, defer_init=False): - """Internal hook that can be overridden to hook a different generate - method in. - - .. versionadded:: 2.5 - """ - return generate(source, self, name, filename, defer_init=defer_init) - - def _compile(self, source, filename): - """Internal hook that can be overridden to hook a different compile - method in. - - .. versionadded:: 2.5 - """ - return compile(source, filename, 'exec') - - @internalcode - def compile(self, source, name=None, filename=None, raw=False, - defer_init=False): - """Compile a node or template source code. The `name` parameter is - the load name of the template after it was joined using - :meth:`join_path` if necessary, not the filename on the file system. - the `filename` parameter is the estimated filename of the template on - the file system. If the template came from a database or memory this - can be omitted. - - The return value of this method is a python code object. If the `raw` - parameter is `True` the return value will be a string with python - code equivalent to the bytecode returned otherwise. This method is - mainly used internally. - - `defer_init` is use internally to aid the module code generator. This - causes the generated code to be able to import without the global - environment variable to be set. - - .. versionadded:: 2.4 - `defer_init` parameter added. - """ - source_hint = None - try: - if isinstance(source, string_types): - source_hint = source - source = self._parse(source, name, filename) - if self.optimized: - source = optimize(source, self) - source = self._generate(source, name, filename, - defer_init=defer_init) - if raw: - return source - if filename is None: - filename = '