Commit 9df63f3d authored by Daniele Tricoli's avatar Daniele Tricoli 🦀

Update upstream source from tag 'upstream/20181108+dfsg'

Update to upstream version '20181108+dfsg'
with Debian dir 8a9cfc040809ee9dc5b4eed867064b605812503a
parents c83d3fb8 6fe31d08
*.class
*.pyc
*.pyo
.svn
_svn
.pythoscope
.ipynb_checkpoints
.settings
_update.bat
docs/_build
/Goulib.egg-info/
/build/
/dist/
/pdfminer.six.egg-info/
tests/*.xml
tests/*.txt
.idea/
.tox/
language: python
python:
- "2.7"
- "3.4"
- "3.5"
- "3.6"
install:
- pip install six
- pip install pycryptodome
- pip install chardet
- pip install sortedcontainers
script:
nosetests --nologcapture
# List of changes
## Version 20181108
- PR #141 to speedup layout analysis
- PR #173 for using argparse and replace deprecated getopt
- PR #142 to compile pdfminer.six with cython, successfully
\ No newline at end of file
Copyright (c) 2004-2016 Yusuke Shinyama <yusuke at shinyama dot jp>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
include Makefile
include LICENSE
include *.txt
include *.md
include *.py
graft cmaprsrc
graft docs
graft pdfminer
graft samples
graft tools
global-exclude *.pyc
## Makefile (for maintainance purpose)
## Makefile (for maintenance purpose)
##
PACKAGE=pdfminer
PYTHON=python2
PYTHON=python
GIT=git
RM=rm -f
CP=cp -f
......@@ -19,8 +19,9 @@ clean:
-$(RM) -r build dist MANIFEST
-cd $(PACKAGE) && $(MAKE) clean
-cd tools && $(MAKE) clean
-cd samples && $(MAKE) clean
distclean: clean test_clean cmap_clean
distclean: clean cmap_clean
sdist: distclean MANIFEST.in
$(PYTHON) setup.py sdist
......@@ -54,6 +55,5 @@ $(CMAPDST)/to-unicode-Adobe-Korea1.pickle.gz: $(CMAPDST)
$(CMAPDST) Adobe-Korea1 $(CMAPSRC)/cid2code_Adobe_Korea1.txt
test: cmap
nosetests
cd samples && $(MAKE) test
test_clean:
-cd samples && $(MAKE) clean
Metadata-Version: 1.1
Name: pdfminer
Version: 20140328
Summary: PDF parser and analyzer
Home-page: http://euske.github.io/pdfminer/index.html
Author: Yusuke Shinyama
Author-email: yusuke at cs dot nyu dot edu
License: MIT/X
Description: PDFMiner is a tool for extracting information from PDF documents.
Unlike other PDF-related tools, it focuses entirely on getting
and analyzing text data. PDFMiner allows to obtain
the exact location of texts in a page, as well as
other information such as fonts or lines.
It includes a PDF converter that can transform PDF files
into other text formats (such as HTML). It has an extensible
PDF parser that can be used for other purposes instead of text analysis.
Keywords: pdf parser,pdf converter,layout analysis,text mining
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: MIT License
Classifier: Topic :: Text Processing
PDFMiner.six
============
PDFMiner.six is a fork of PDFMiner using six for Python 2+3 compatibility
[![Build Status](https://travis-ci.org/pdfminer/pdfminer.six.svg?branch=master)](https://travis-ci.org/pdfminer/pdfminer.six) [![PyPI version](https://img.shields.io/pypi/v/pdfminer.six.svg)](https://pypi.python.org/pypi/pdfminer.six/)
PDFMiner is a tool for extracting information from PDF documents.
Unlike other PDF-related tools, it focuses entirely on getting
and analyzing text data. PDFMiner allows one to obtain
the exact location of text in a page, as well as
other information such as fonts or lines.
It includes a PDF converter that can transform PDF files
into other text formats (such as HTML). It has an extensible
PDF parser that can be used for other purposes than text analysis.
* Webpage: https://github.com/pdfminer/
* Download (PyPI): https://pypi.python.org/pypi/pdfminer.six/
Features
--------
* Written entirely in Python.
* Parse, analyze, and convert PDF documents.
* PDF-1.7 specification support. (well, almost)
* CJK languages and vertical writing scripts support.
* Various font types (Type1, TrueType, Type3, and CID) support.
* Basic encryption (RC4) support.
* Outline (TOC) extraction.
* Tagged contents extraction.
* Automatic layout analysis.
How to Install
--------------
* Install Python 2.7 or newer. (Python 3.x is supported in pdfminer.six)
* Install
`pip install pdfminer.six`
* Run the following test:
`pdf2txt.py samples/simple1.pdf`
Command Line Tools
------------------
PDFMiner comes with two handy tools:
pdf2txt.py and dumppdf.py.
**pdf2txt.py**
pdf2txt.py extracts text contents from a PDF file.
It extracts all the text that are to be rendered programmatically,
i.e. text represented as ASCII or Unicode strings.
It cannot recognize text drawn as images that would require optical character recognition.
It also extracts the corresponding locations, font names, font sizes, writing
direction (horizontal or vertical) for each text portion.
You need to provide a password for protected PDF documents when its access is restricted.
You cannot extract any text from a PDF document which does not have extraction permission.
(For details, refer to /docs/index.html.)
**dumppdf.py**
dumppdf.py dumps the internal contents of a PDF file in pseudo-XML format.
This program is primarily for debugging purposes,
but it's also possible to extract some meaningful contents (e.g. images).
(For details, refer to /docs/index.html.)
TODO
----
* PEP-8 and PEP-257 conformance.
* Better documentation.
* Performance improvements.
Terms and Conditions
--------------------
(This is so-called MIT/X License)
Copyright (c) 2004-2014 Yusuke Shinyama <yusuke at cs dot nyu dot edu>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
......@@ -5,7 +5,7 @@ to decode text data written in CJK (Chinese, Japanese, Korean) language.
CMap resources are now available freely from Adobe web site:
http://opensource.adobe.com/wiki/display/cmap/CMap+Resources
The follwing files were extracted from the downloadable tarballs:
The following files were extracted from the downloadable tarballs:
cid2code_Adobe_CNS1.txt:
http://download.macromedia.com/pub/opensource/cmap/cmapresources_cns1-6.tar.z
......
......@@ -9,7 +9,7 @@
<div align=right class=lastmod>
<!-- hhmts start -->
Last Modified: Fri Mar 28 09:17:06 UTC 2014
Last Modified: Wed Jun 25 10:27:52 UTC 2014
<!-- hhmts end -->
</div>
......@@ -58,7 +58,7 @@ PDF parser that can be used for other purposes than text analysis.
<p>
<h3>Features</h3>
<ul>
<li> Written entirely in Python. (for version 2.4 or newer)
<li> Written entirely in Python. (for version 2.6 or newer)
<li> Parse, analyze, and convert PDF documents.
<li> PDF-1.7 specification support. (well, almost)
<li> CJK languages and vertical writing scripts support.
......@@ -82,14 +82,14 @@ http://pdf2html.tabesugi.net:8080/
<h3><a name="download">Download</a></h3>
<p>
<strong>Source distribution:</strong><br>
<a href="http://pypi.python.org/pypi/pdfminer/">
http://pypi.python.org/pypi/pdfminer/
<a href="http://pypi.python.org/pypi/pdfminer_six/">
http://pypi.python.org/pypi/pdfminer_six/
</a>
<P>
<strong>github:</strong><br>
<a href="https://github.com/euske/pdfminer/">
https://github.com/euske/pdfminer/
<a href="https://github.com/goulu/pdfminer/">
https://github.com/goulu/pdfminer/
</a>
<h3><a name="wheretoask">Where to Ask</a></h3>
......@@ -100,11 +100,9 @@ https://github.com/euske/pdfminer/
http://groups.google.com/group/pdfminer-users/
</a>
<h2><a name="install">How to Install</a></h2>
<ol>
<li> Install <a href="http://www.python.org/download/">Python</a> 2.4 or newer.
(<font color=red><strong>Python 3 is not supported.</strong></font>)
<li> Install <a href="http://www.python.org/download/">Python</a> 2.6 or newer.
<li> Download the <a href="#source">PDFMiner source</a>.
<li> Unpack it.
<li> Run <code>setup.py</code> to install:<br>
......@@ -240,7 +238,7 @@ represented as a space, but indicated by the positioning of each word.
<p>
Each value is specified not as an actual length, but as a proportion of
the length to the size of each character in question. The default values
are M = 1.0, L = 0.3, and W = 0.2, respectively.
are M = 2.0, L = 0.5, and W = 0.1, respectively.
<table style="border:2px gray solid; margin: 10px; padding: 10px;"><tr>
<td style="border-right:1px red solid" align=right>&rarr;</td>
<td style="border-left:1px red solid" colspan="4" align=left>&larr; <em><font color="red">M</font></em></td>
......@@ -372,82 +370,10 @@ no stream header is displayed for the ease of saving it to a file.
<dd> Increases the debug level.
</dl>
<h2><a name="changes">Changes</a></h2>
<h2><a name="changes">Changes:</a></h2>
<ul>
<li> 2014/03/28: Further bugfixes.
<li> 2014/03/24: Bugfixes and improvements for fauly PDFs.<br>
API changes:
<ul>
<li> <code>PDFDocument.initialize()</code> method is removed and no longer needed.
A password is given as an argument of a PDFDocument constructor.
</ul>
<li> 2013/11/13: Bugfixes and minor improvements.<br>
As of November 2013, there were a few changes made to the PDFMiner API
prior to October 2013. This is the result of code restructuring. Here
is a list of the changes:
<ul>
<li> <code>PDFDocument</code> class is moved to <code>pdfdocument.py</code>.
<li> <code>PDFDocument</code> class now takes a <code>PDFParser</code> object as an argument.
<li> <code>PDFDocument.set_parser()</code> and <code>PDFParser.set_document()</code> is removed.
<li> <code>PDFPage</code> class is moved to <code>pdfpage.py</code>.
<li> <code>process_pdf</code> function is implemented as <code>PDFPage.get_pages</code>.
</ul>
<li> 2013/10/22: Sudden resurge of interests. API changes.
Incorporated a lot of patches and robust handling of broken PDFs.
<li> 2011/05/15: Speed improvements for layout analysis.
<li> 2011/05/15: API changes. <code>LTText.get_text()</code> is added.
<li> 2011/04/20: API changes. LTPolygon class was renamed as LTCurve.
<li> 2011/04/20: LTLine now represents horizontal/vertical lines only. Thanks to Koji Nakagawa.
<li> 2011/03/07: Documentation improvements by Jakub Wilk. Memory usage patch by Jonathan Hunt.
<li> 2011/02/27: Bugfixes and layout analysis improvements. Thanks to fujimoto.report.
<li> 2010/12/26: A couple of bugfixes and minor improvements. Thanks to Kevin Brubeck Unhammer and Daniel Gerber.
<li> 2010/10/17: A couple of bugfixes and minor improvements. Thanks to standardabweichung and Alastair Irving.
<li> 2010/09/07: A minor bugfix. Thanks to Alexander Garden.
<li> 2010/08/29: A couple of bugfixes. Thanks to Sahan Malagi, pk, and Humberto Pereira.
<li> 2010/07/06: Minor bugfixes. Thanks to Federico Brega.
<li> 2010/06/13: Bugfixes and improvements on CMap data compression. Thanks to Jakub Wilk.
<li> 2010/04/24: Bugfixes and improvements on TOC extraction. Thanks to Jose Maria.
<li> 2010/03/26: Bugfixes. Thanks to Brian Berry and Lubos Pintes.
<li> 2010/03/22: Improved layout analysis. Added regression tests.
<li> 2010/03/12: A couple of bugfixes. Thanks to Sean Manefield.
<li> 2010/02/27: Changed the way of internal layout handling. (LTTextItem -&gt; LTChar)
<li> 2010/02/15: Several bugfixes. Thanks to Sean.
<li> 2010/02/13: Bugfix and enhancement. Thanks to Andr&eacute; Auzi.
<li> 2010/02/07: Several bugfixes. Thanks to Hiroshi Manabe.
<li> 2010/01/31: JPEG image extraction supported. Page rotation bug fixed.
<li> 2010/01/04: Python 2.6 warning removal. More doctest conversion.
<li> 2010/01/01: CMap bug fix. Thanks to Winfried Plappert.
<li> 2009/12/24: RunLengthDecode filter added. Thanks to Troy Bollinger.
<li> 2009/12/20: Experimental polygon shape extraction added. Thanks to Yusuf Dewaswala for reporting.
<li> 2009/12/19: CMap resources are now the part of the package. Thanks to Adobe for open-sourcing them.
<li> 2009/11/29: Password encryption bug fixed. Thanks to Yannick Gingras.
<li> 2009/10/31: SGML output format is changed and renamed as XML.
<li> 2009/10/24: Charspace bug fixed. Adjusted for 4-space indentation.
<li> 2009/10/04: Another matrix operation bug fixed. Thanks to Vitaly Sedelnik.
<li> 2009/09/12: Fixed rectangle handling. Able to extract image boundaries.
<li> 2009/08/30: Fixed page rotation handling.
<li> 2009/08/26: Fixed zlib decoding bug. Thanks to Shon Urbas.
<li> 2009/08/24: Fixed a bug in character placing. Thanks to Pawan Jain.
<li> 2009/07/21: Improvement in layout analysis.
<li> 2009/07/11: Improvement in layout analysis. Thanks to Lubos Pintes.
<li> 2009/05/17: Bugfixes, massive code restructuring, and simple graphic element support added. setup.py is supported.
<li> 2009/03/30: Text output mode added.
<li> 2009/03/25: Encoding problems fixed. Word splitting option added.
<li> 2009/02/28: Robust handling of corrupted PDFs. Thanks to Troy Bollinger.
<li> 2009/02/01: Various bugfixes. Thanks to Hiroshi Manabe.
<li> 2009/01/17: Handling a trailer correctly that contains both /XrefStm and /Prev entries.
<li> 2009/01/10: Handling Type3 font metrics correctly.
<li> 2008/12/28: Better handling of word spacing. Thanks to Christian Nentwich.
<li> 2008/09/06: A sample pdf2html webapp added.
<li> 2008/08/30: ASCII85 encoding filter support.
<li> 2008/07/27: Tagged contents extraction support.
<li> 2008/07/10: Outline (TOC) extraction support.
<li> 2008/06/29: HTML output added. Reorganized the directory structure.
<li> 2008/04/29: Bugfix for Win32. Thanks to Chris Clark.
<li> 2008/04/27: Basic encryption and LZW decoding support added.
<li> 2008/01/07: Several bugfixes. Thanks to Nick Fabry for his vast contribution.
<li> 2007/12/31: Initial release.
<li> 2004/12/24: Start writing the code out of boredom...
<li> 2014/09/15: pushed on PyPi</li>
<li> 2014/09/10: pdfminer_six forked from pdfminer since Yusuke didn't want to merge and pdfminer3k is outdated</li>
</ul>
<h2><a name="todo">TODO</a></h2>
......
#!/usr/bin/env python
__version__ = '20140328'
# -*- coding: utf-8 -*-
"""
Fork of PDFMiner using six for Python 2+3 compatibility
PDFMiner is a tool for extracting information from PDF documents.
Unlike other PDF-related tools, it focuses entirely on getting and analyzing
text data. PDFMiner allows to obtain the exact location of texts in a page,
as well as other information such as fonts or lines.
It includes a PDF converter that can transform PDF files into other text
formats (such as HTML). It has an extensible PDF parser that can be used for
other purposes instead of text analysis.
"""
__version__ = '20181108'
if __name__ == '__main__':
print __version__
print(__version__)
#!/usr/bin/env python
""" Python implementation of Arcfour encryption algorithm.
""" Python implementation of Arcfour encryption algorithm.
See https://en.wikipedia.org/wiki/RC4
This code is in the public domain.
"""
import six # Python 2+3 compatibility
## Arcfour
##
class Arcfour(object):
"""
>>> Arcfour('Key').process('Plaintext').encode('hex')
'bbf316e8d940af0ad3'
>>> Arcfour('Wiki').process('pedia').encode('hex')
'1021bf0420'
>>> Arcfour('Secret').process('Attack at dawn').encode('hex')
'45a01f645fc35b383552544b9bf5'
"""
def __init__(self, key):
s = range(256)
s = [i for i in range(256)] #because Py3 range is not indexable
j = 0
klen = len(key)
for i in xrange(256):
j = (j + s[i] + ord(key[i % klen])) % 256
for i in range(256):
j = (j + s[i] + six.indexbytes(key,i % klen)) % 256
(s[i], s[j]) = (s[j], s[i])
self.s = s
(self.i, self.j) = (0, 0)
......@@ -34,17 +25,16 @@ class Arcfour(object):
def process(self, data):
(i, j) = (self.i, self.j)
s = self.s
r = ''
for c in data:
r = b''
for c in six.iterbytes(data):
i = (i+1) % 256
j = (j+s[i]) % 256
(s[i], s[j]) = (s[j], s[i])
k = s[(s[i]+s[j]) % 256]
r += chr(ord(c) ^ k)
r += six.int2byte(c ^ k)
(self.i, self.j) = (i, j)
return r
encrypt = decrypt = process
# test
if __name__ == '__main__':
import doctest
doctest.testmod()
new = Arcfour
#!/usr/bin/env python
""" Python implementation of ASCII85/ASCIIHex decoder (Adobe version).
......@@ -9,6 +9,8 @@ This code is in the public domain.
import re
import struct
import six #Python 2+3 compatibility
# ascii85decode(data)
def ascii85decode(data):
......@@ -21,27 +23,21 @@ def ascii85decode(data):
The Adobe's ASCII85 implementation is slightly different from
its original in handling the last characters.
The sample string is taken from:
http://en.wikipedia.org/w/index.php?title=Ascii85
>>> ascii85decode('9jqo^BlbD-BleB1DJ+*+F(f,q')
'Man is distinguished'
>>> ascii85decode('E,9)oF*2M7/c~>')
'pleasure.'
"""
n = b = 0
out = ''
for c in data:
if '!' <= c and c <= 'u':
out = b''
for i in six.iterbytes(data):
c=six.int2byte(i)
if b'!' <= c and c <= b'u':
n += 1
b = b*85+(ord(c)-33)
if n == 5:
out += struct.pack('>L', b)
n = b = 0
elif c == 'z':
assert n == 0
out += '\0\0\0\0'
elif c == '~':
elif c == b'z':
assert n == 0, str(n)
out += b'\0\0\0\0'
elif c == b'~':
if n:
for _ in range(5-n):
b = b*85+84
......@@ -50,8 +46,8 @@ def ascii85decode(data):
return out
# asciihexdecode(data)
hex_re = re.compile(r'([a-f\d]{2})', re.IGNORECASE)
trail_re = re.compile(r'^(?:[a-f\d]{2}|\s)*([a-f\d])[\s>]*$', re.IGNORECASE)
hex_re = re.compile(b'([a-f\d]{2})', re.IGNORECASE)
trail_re = re.compile(b'^(?:[a-f\d]{2}|\s)*([a-f\d])[\s>]*$', re.IGNORECASE)
def asciihexdecode(data):
......@@ -63,22 +59,16 @@ def asciihexdecode(data):
EOD. Any other characters will cause an error. If the filter encounters
the EOD marker after reading an odd number of hexadecimal digits, it
will behave as if a 0 followed the last digit.
>>> asciihexdecode('61 62 2e6364 65')
'ab.cde'
>>> asciihexdecode('61 62 2e6364 657>')
'ab.cdep'
>>> asciihexdecode('7>')
'p'
"""
decode = (lambda hx: chr(int(hx, 16)))
out = map(decode, hex_re.findall(data))
m = trail_re.search(data)
if m:
out.append(decode("%c0" % m.group(1)))
return ''.join(out)
def decode(x):
i=int(x,16)
return six.int2byte(i)
out=b''
for x in hex_re.findall(data):
out+=decode(x)
if __name__ == '__main__':
import doctest
doctest.testmod()
m = trail_re.search(data)
if m:
out+=decode(m.group(1)+b'0')
return out
#!/usr/bin/env python
# CCITT Fax decoder
#
# Bugs: uncompressed mode untested.
......@@ -13,6 +13,17 @@
import sys
import array
import six #Python 2+3 compatibility
if six.PY3:
def get_bytes(data):
for byte in data:
yield byte
else:
def get_bytes(data):
for char in data:
yield ord(char)
## BitParser
##
......@@ -26,7 +37,7 @@ class BitParser(object):
def add(klass, root, v, bits):
p = root
b = None
for i in xrange(len(bits)):
for i in range(len(bits)):
if 0 < i:
if p[b] is None:
p[b] = [None, None]
......@@ -39,10 +50,9 @@ class BitParser(object):
return
def feedbytes(self, data):
for c in data:
b = ord(c)
for byte in get_bytes(data):
for m in (128, 64, 32, 16, 8, 4, 2, 1):
self._parse_bit(b & m)
self._parse_bit(byte & m)
return
def _parse_bit(self, x):
......@@ -327,11 +337,10 @@ class CCITTG4Parser(BitParser):
return
def feedbytes(self, data):
for c in data:
b = ord(c)
for byte in get_bytes(data):
try:
for m in (128, 64, 32, 16, 8, 4, 2, 1):
self._parse_bit(b & m)
self._parse_bit(byte & m)
except self.ByteSkip:
self._accept = self._parse_mode
self._state = self.MODE
......@@ -425,7 +434,7 @@ class CCITTG4Parser(BitParser):
return
def output_line(self, y, bits):
print y, ''.join(str(b) for b in bits)
print (y, ''.join(str(b) for b in bits))
return
def _reset_line(self):
......@@ -462,10 +471,10 @@ class CCITTG4Parser(BitParser):
x0 = max(0, self._curpos)
x1 = max(0, min(self.width, x1))
if x1 < x0:
for x in xrange(x1, x0):
for x in range(x1, x0):
self._curline[x] = self._color
elif x0 < x1:
for x in xrange(x0, x1):
for x in range(x0, x1):
self._curline[x] = self._color
self._curpos = x1
self._color = 1-self._color
......@@ -495,7 +504,7 @@ class CCITTG4Parser(BitParser):
self._refline[x1] == self._color):
break
x1 += 1
for x in xrange(self._curpos, x1):
for x in range(self._curpos, x1):
self._curline[x] = self._color
self._curpos = x1
return
......@@ -505,12 +514,12 @@ class CCITTG4Parser(BitParser):
if self._curpos < 0:
self._curpos = 0
x = self._curpos