Commit 5c3e4f25 authored by Stefano Zacchiroli's avatar Stefano Zacchiroli

New upstream version 2.1.3+hg20181225

parent 0414a6e1
syntax: glob
*.pyc
*.swp
*~
TAGS
build
dist
*.a
*.so
.noseids
examples/generated/*
beancount/parser/grammar.output
*.picklecache
*.DS_Store
.idea
beancount.egg-info
.cache
.pytest_cache/*
12b7f102ffbb8ae6d18fd6edf40f5cf2d354a4d6 v1
1f60a2293b600c4bc22926fb1cc0d4300145eba2 old-posting
da62959c106a3f06ffc4a1fcdb5e6ae97e77152d master
6d69f09be1d4ac9365432b2a8a4bc130b521ccda github/master
a85679977ede1586585ae0b92cd121e1b8835481 github/master
8af008648277cd9bf2a6699d6db550ad12d88453 2.0.0
e291c91b37e1d21c29645d93473a7917bb726699 2.1.0
c36d4fec6823ddbfbabbfde429f01175cdb4d661 2.1.1
2b20d301782528147e0aba744da9a014323ef630 2.1.2
37d28f318d3711e6d6646c76e97546114ec22808 2.1.3
fd27856ff2de97c7ae94a4cbc0bbac4174c70f3e 2.1.3
This diff is collapsed.
language: python
python:
- "3.6"
addons:
apt:
packages:
- gnupg
- zip
install:
- pip install -r requirements.txt
script:
- make build
- NOSE=nosetests make test
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
Martin Blais (original author).
Contributors (in alphabetical order):
- Adrián Medraño Calvo (support for UTF-8)
- Alex Johnstone (bug fix)
- Alok Parlikar (bug fix)
- Christoph Sarnowski (bug fix)
- Daniel Clemente (early adoption and tickets)
- Dominik Aumayr (web-related bug fixes, static documentation, author of Fava)
- Ethan Glasser-Camp (improvements to grammar)
- Felix Eckhofer (bug fix)
- Hugo Ideler (bug fix price source0
- Jakob Schnitzer (bug fixes in plugins and SQL, co-author/maintainer of Fava)
- Jason Chu (bug fixes, plugins, lots of comments)
- Jeff Brantley (windows installation improvements)
- Johannes Harms (bug fixes, forecast plugin improvement)
- Mark Hansen (docs)
- Markus Teufelberger (bug fix / testing)
- Martin Michlmayr (numerous tickets, CI setup, bug fixes and improvements)
- Michael Droogleever (importer-related improvements)
- Mikhail Gusarov (parser / improvement for short account names)
- Patrick Ruckstuhl (bug fix importers)
- Robert Sesek (bug fix)
- Zhuoyun Wei (importers improvements)
- dave_stephens (web-related improvement)
- rkhwaja (windows installation improvement)
- Сергей Трофимов (bug fix, parser)
And all the other users on the mailing-list asking important questions, making
useful suggestions and engaging in productive discussions. Thank you!
include requirements.txt
include COPYING
#!/usr/bin/env make
# Just my big old fat ledger file.
INPUT = $(HOME)/q/office/accounting/blais.beancount
DOWNLOADS = $(HOME)/u/Downloads
GREP="grep --include="*.py" -srnE"
TOOLS=./tools
PYTHON?=python3
all: build
# Clean everything up.
clean:
rm -f core
rm -rf build
rm -f $(CROOT)/grammar.h $(CROOT)/grammar.c
rm -f $(CROOT)/lexer.h $(CROOT)/lexer.c
rm -f $(CROOT)/*.so
find . -name __pycache__ -exec rm -r "{}" \; -prune
# Targets to generate and compile the C parser.
CROOT = beancount/parser
# See
# https://www.owlfolio.org/possibly-useful/flex-input-scanner-rules-are-too-complicated/
#LEX = flex -Ca
LEX = flex
YACC = bison --report=itemset --verbose
FILTERYACC = sed -e 's@/\*[ \t]yacc\.c:.*\*/@@'
TMP=/tmp
$(CROOT)/grammar.c $(CROOT)/grammar.h: $(CROOT)/grammar.y
$(YACC) -o $(CROOT)/grammar.c $<
(cat $(CROOT)/grammar.c | $(FILTERYACC) > $(TMP)/grammar.c ; mv $(TMP)/grammar.c $(CROOT)/grammar.c )
(cat $(CROOT)/grammar.h | $(FILTERYACC) > $(TMP)/grammar.h ; mv $(TMP)/grammar.h $(CROOT)/grammar.h )
UNICODE_CATEGORY_RANGES_GENERATOR=$(TOOLS)/generate_unicode_category_regexps.py
UNICODE_CATEGORY_DIR = $(CROOT)/lexer
UNICODE_CATEGORIES = Lu Ll Lt Lo Nd Nl No
UNICODE_CATEGORY_SOURCES = $(patsubst %, $(UNICODE_CATEGORY_DIR)/%.l, $(UNICODE_CATEGORIES))
$(UNICODE_CATEGORY_SOURCES): $(UNICODE_CATEGORY_DIR)/%.l :
$(PYTHON) $(UNICODE_CATEGORY_RANGES_GENERATOR) \
--format=lex --name=UTF-8-$* --categories=$* >$@
# Note that flex parses the files in the given order.
#LEXER_SOURCES = $(UNICODE_CATEGORY_SOURCES) $(CROOT)/lexer.l
#$(CROOT)/lexer.c $(CROOT)/lexer.h: $(LEXER_SOURCES) $(CROOT)/grammar.h
# $(LEX) --outfile=$(CROOT)/lexer.c --header-file=$(CROOT)/lexer.h $(LEXER_SOURCES)
# patch -p1 < $(CROOT)/lexer.patch
FLEX_VERSION=$(shell $(LEX) -V)
$(CROOT)/lexer.c $(CROOT)/lexer.h: $(CROOT)/lexer.l $(CROOT)/grammar.h
$(LEX) --outfile=$(CROOT)/lexer.c --header-file=$(CROOT)/lexer.h $<
SOURCES = \
$(CROOT)/lexer.c \
$(CROOT)/lexer.h \
$(CROOT)/grammar.c \
$(CROOT)/grammar.h
.PHONY: build
build: $(SOURCES)
$(PYTHON) setup.py build_ext -i
build35: $(SOURCES)
python3.5 setup.py build_ext -i
# Dump the lexer parsed output. This can be used to check across languages.
dump_lexer:
bean-dump-lexer $(INPUT)
# Check for memory leaks.
grind:
valgrind --leak-check=full $(PYTHON) bean-sandbox $(INPUT)
# Regenerate the website.
html docs:
projects docs beancount
# Compute and plot inter-module dependencies.
# We want to insure a really strict set of relationships between the modules,
# and this is the high-level picture.
build/beancount.deps:
sfood -i bin beancount > $@
CLUSTERS_REGEXPS = \
beancount/core/.*_test\.py core/tests \
beancount/core core \
beancount/ops/.*_test\.py ops/tests \
beancount/ops ops \
beancount/parser/printer_test\.py printer/tests \
beancount/parser/printer.py printer \
beancount/parser/options_test\.py options/tests \
beancount/parser/options.py options \
beancount/parser/.*_test\.py parser/tests \
beancount/parser parser \
beancount/plugins/.*_test\.py plugins/tests \
beancount/plugins plugins \
beancount/reports/.*_test\.py reports/tests \
beancount/reports reports \
beancount/scripts/bake.*_test\.py scripts/bake/tests \
beancount/scripts/bake.* scripts/bake \
beancount/scripts/.*_test\.py scripts/tests \
beancount/scripts scripts \
beancount/utils/.*_test\.py utils/tests \
beancount/utils utils \
beancount/web/.*_test\.py web/tests \
beancount/web web \
beancount/query/.*_test\.py query/tests \
beancount/query query \
beancount/load.*_test\.py load/tests \
beancount/load.*\.py load \
beancount load
GRAPHER = dot
build/beancount.pdf: build/beancount.deps
cat $< | sfood-cluster-regexp $(CLUSTERS_REGEXPS) | grep -v /tests | sfood-graph | $(GRAPHER) -Tps | ps2pdf - $@
evince $@
build/beancount_tests.pdf: build/beancount.deps
cat $< | sfood-cluster-regexp $(CLUSTERS_REGEXPS) | sfood-graph | $(GRAPHER) -Tps | ps2pdf - $@
evince $@
# Compute ahd plot the dependencies within the core.
# We are considering a separation of the basic data structure and the basic operations.
# This provides the detail of the relationships between these sets of fils.
build/beancount-core.pdf: build/beancount-core.deps
sfood -ii beancount/core/*.py | sfood-graph | $(GRAPHER) -Tps | ps2pdf - $@
showdeps-core: build/beancount-core.pdf
evince $<
# Run in the debugger.
debug:
gdb --args $(PYTHON) /home/blais/p/beancount/bin/bean-sandbox $(INPUT)
# Push to github.
github:
hg bookmark -r default master
hg push git+ssh://git@github.com/blais/beancount
# Bake a release.
release:
$(PYTHON) setup.py register sdist upload
vtest vtests verbose-test verbose-tests:
$(PYTHON) -m pytest -v -s beancount examples
qtest qtests quiet-test quiet-tests test tests:
$(PYTHON) -m pytest beancount
test-last test-last-failed test-failed:
$(PYTHON) -m pytest --last-failed beancount
test-naked:
PATH=/bin:/usr/bin PYTHONPATH= $(PYTHON) -m pytest -x beancount
# Run the parser and measure its performance.
.PHONY: check
check:
bean-check $(INPUT)
# Run the demo program.
demo:
bin/bean-web --debug examples/demo.beancount
# Generate the tutorial files from the example file.
EXAMPLE=examples/example.beancount
example $(EXAMPLE):
./bin/bean-example --seed=0 -o $(EXAMPLE)
TUTORIAL=examples/tutorial
tutorial: $(EXAMPLE)
$(PYTHON) beancount/scripts/tutorial.py $(EXAMPLE) $(TUTORIAL)
# Run the web server.
.PHONY: web
web:
bean-web --debug $(INPUT)
.PHONY: web-incognito
web-incognito:
bean-web --incognito --debug $(INPUT)
# Run the importer.
.PHONY: import
import:
bean-import $(INPUT) $(DOWNLOADS)
# My development sandbox script. This is messy and it's okay.
.PHONY: sandbox
sandbox:
bean-sandbox $(INPUT)
missing-tests:
$(TOOLS)/find_missing_tests.py beancount
fixmes:
egrep -srn '\b(FIXME|TODO\()' beancount || true
filter-terms:
egrep --exclude-dir='.hg' --exclude-dir='__pycache__' -srn 'GOOGL?\b' $(PWD) | grep -v GOOGLE_APIS || true
multi-imports:
(egrep -srn '^(from.*)?import.*,' beancount | grep -v 'from typing') || true
# Check for unused imports.
sfood-checker:
sfood-checker bin beancount
# Check dependency constraints.
constraints dep-constraints: build/beancount.deps
$(TOOLS)/dependency_constraints.py $<
# Run the linter on all source code.
# To list all messages, call: "pylint --list-msgs"
LINT_SRCS = \
beancount \
examples/ingest/office/importers \
bin/* \
tools/*.py
# Note: Keeping to 3.5 because 3.6 pylint raises an exception (as of 2017-01-15).
#PYLINT = pylint
PYLINT = python3 $(shell which pylint)
pylint lint:
$(PYLINT) --rcfile=$(PWD)/etc/pylintrc $(LINT_SRCS)
LINT_TESTS=useless-suppression,empty-docstring
pylint-only:
$(PYLINT) --rcfile=$(PWD)/etc/pylintrc --disable=all --enable=$(LINT_TESTS) $(LINT_SRCS)
pyflakes:
pyflakes $(LINT_SRCS)
# Check everything.
status check: pylint pyflakes filter-terms missing-tests dep-constraints multi-imports test
# Experimental docs conversion.
download-pdf:
./tools/download_docs.py pdf $(HOME)/p/beancount-downloads/pdf
download-odt:
./tools/download_docs.py odt $(HOME)/p/beancount-downloads/odt
sphinx sphinx_odt2rst:
./tools/sphinx_odt2rst.py $(HOME)/p/beancount-downloads/odt $(HOME)/p/beancount-docs
convert_test:
./tools/convert_doc.py --cache=/tmp/convert_test.cache '1WjARst_cSxNE-Lq6JnJ5CC41T3WndEsiMw4d46r2694' /tmp/trading.md
# This does not work well; import errors just won't go away, it's slow, and it
# seems you have to pregenerate all .pyi to do anything useful.
pytype:
find $(PWD)/beancount -name '*.py' | parallel -j16 pytype --pythonpath=$(PWD) -o {}i {}
pytype1:
pytype --pythonpath=$(PWD) beancount/utils/net_utils.py
Metadata-Version: 1.1
Name: beancount
Version: 2.1.3
Summary: Command-line Double-Entry Accounting
Home-page: http://furius.ca/beancount
Description: Command-line Double-Entry Accounting
Official-Homepage: http://furius.ca/beancount
Author: Martin Blais
Author-email: blais@furius.ca
Author-Email: blais@furius.ca
ChangeLog: http://furius.ca/beancount/CHANGES
Bugs-Reporting: mailto:blais@furius.ca
Download-Snapshots: http://bitbucket.org/blais/beancount
License: GNU GPLv2 only
Download-URL: http://bitbucket.org/blais/beancount
Description:
A double-entry accounting system that uses text files as input.
Beancount defines a simple data format or "language" that lets you define
financial transaction records in a text file, load them in memory and
generate and export a variety of reports, such as balance sheets or income
statements. It also provides a client with an SQL-like query language to
filter and aggregate financial data, and a web interface which renders
those reports to HTML. Finally, it provides the scaffolding required to
automate the conversion of external data into one's input file in
Beancount syntax.
Platform: UNKNOWN
This diff is collapsed.
environment:
matrix:
# For Python versions available on Appveyor, see
# http://www.appveyor.com/docs/installed-software#python
# The list here is complete (excluding Python 2.6, which
# isn't covered by this document) at the time of writing.
- PYTHON: "C:\\Python36-x64"
install:
# We need wheel installed to build wheels
- "%PYTHON%\\python.exe -m pip install wheel"
build: off
# test_script:
# Put your test command here.
# If you don't need to build C extensions on 64-bit Python 3.3 or 3.4,
# you can remove "build.cmd" from the front of the command, as it's
# only needed to support those cases.
# Note that you must use the environment variable %PYTHON% to refer to
# the interpreter you're using - Appveyor does not do anything special
# to put the Python evrsion you want to use on PATH.
# - "%PYTHON%\\python.exe setup.py test"
after_test:
# This step builds your wheels.
# Again, you only need build.cmd if you're building C extensions for
# 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
# interpreter
- "%PYTHON%\\python.exe setup.py build_ext --compiler=msvc --define YY_NO_UNISTD_H"
- "%PYTHON%\\python.exe setup.py bdist_wheel"
artifacts:
# bdist_wheel puts your built wheel in the dist directory
- path: dist\*.whl
#on_success:
# You can use this step to upload your artifacts to a public website.
# See Appveyor's documentation for more details. Or you can simply
# access your wheels from the Appveyor "artifacts" tab for your build.
Metadata-Version: 1.1
Name: beancount
Version: 2.1.3
Summary: Command-line Double-Entry Accounting
Home-page: http://furius.ca/beancount
Author: Martin Blais
Author-email: blais@furius.ca
License: GNU GPLv2 only
Download-URL: http://bitbucket.org/blais/beancount
Description:
A double-entry accounting system that uses text files as input.
Beancount defines a simple data format or "language" that lets you define
financial transaction records in a text file, load them in memory and
generate and export a variety of reports, such as balance sheets or income
statements. It also provides a client with an SQL-like query language to
filter and aggregate financial data, and a web interface which renders
those reports to HTML. Finally, it provides the scaffolding required to
automate the conversion of external data into one's input file in
Beancount syntax.
Platform: UNKNOWN
This diff is collapsed.
pytest
python-dateutil
ply
bottle
lxml
python-magic
beautifulsoup4
requests
chardet
google-api-python-client
......@@ -13,4 +13,23 @@ if (sys.version_info.major, sys.version_info.minor) < (3, 3):
raise ImportError("Python 3.3 or above is required")
__version__ = '2.1.3'
__version__ = '2.2.0-dev'
# Remove annoying warnings in third-party modules.
import warnings
warnings.filterwarnings(
'ignore', module='lxml', category=DeprecationWarning,
message='Using or importing the ABCs from')
warnings.filterwarnings(
'ignore', module='html5lib', category=DeprecationWarning,
message='Using or importing the ABCs from')
warnings.filterwarnings(
'ignore', module='bs4', category=DeprecationWarning,
message='Using or importing the ABCs from')
warnings.filterwarnings(
'ignore', module='bottle', category=DeprecationWarning,
message='Flags not at the start of the expression')
warnings.filterwarnings(
'ignore', module='bottle', category=DeprecationWarning,
message='invalid escape sequence')
......@@ -280,7 +280,7 @@ Note = new_directive('Note', [
# Attributes:
# meta: See above.
# date: See above.
# type: A short string, typically a single lowercase word, that defines a
# "type": A short string, typically a single lowercase word, that defines a
# unique variable whose value changes over time. For example, 'location'.
# description: A free-form string, the value of the variable as of the date
# of the transaction.
......
......@@ -26,7 +26,7 @@ __copyright__ = "Copyright (C) 2013-2017 Martin Blais"
__license__ = "GNU GPLv2"
import collections
from collections import Iterable
from collections.abc import Iterable
import enum
import re
......
......@@ -97,7 +97,6 @@ def find_duplicate_entries(new_entries_list, existing_entries):
Returns:
A list of lists of modified new entries (like new_entries_list),
potentially with modified metadata to indicate those which are duplicated.
"""
mod_entries_list = []
for key, new_entries in new_entries_list:
......@@ -117,11 +116,10 @@ def find_duplicate_entries(new_entries_list, existing_entries):
return mod_entries_list
def print_extracted_entries(importer, entries, file):
"""Print the entries for the given importer.
def print_extracted_entries(entries, file):
"""Print a list of entries.
Args:
importer: An importer object that matched the file.
entries: A list of extracted entries.
file: A file object to write to.
"""
......@@ -151,7 +149,8 @@ def extract(importer_config,
entries=None,
options_map=None,
mindate=None,
ascending=True):
ascending=True,
detect_duplicates_func=None):
"""Given an importer configuration, search for files that can be imported in the
list of files or directories, run the signature checks on them, and if it
succeeds, run the importer on the file.
......@@ -169,6 +168,10 @@ def extract(importer_config,
mindate: Optional minimum date to output transactions for.
ascending: A boolean, true to print entries in ascending order, false if
descending is desired.
detect_duplicates_func: An optional function which accepts a list of
lists of imported entries and a list of entries already existing in
the user's ledger. See function find_duplicate_entries(), which is the
default implementation for this.
"""
allow_none_for_tags_and_links = (
options_map and options_map["allow_deprecated_none_for_tags_and_links"])
......@@ -197,8 +200,9 @@ def extract(importer_config,
# list of existing ones, or against each other. A single call to this
# function is made on purpose, so that the function be able to merge
# entries.
new_entries_list = find_duplicate_entries(
new_entries_list, entries)
if detect_duplicates_func is None:
detect_duplicates_func = find_duplicate_entries
new_entries_list = detect_duplicates_func(new_entries_list, entries)
assert isinstance(new_entries_list, list)
assert all(isinstance(new_entries, tuple) for new_entries in new_entries_list)
assert all(isinstance(new_entries[0], str) for new_entries in new_entries_list)
......@@ -211,7 +215,7 @@ def extract(importer_config,
output.write('\n')
if not ascending:
new_entries.reverse()
print_extracted_entries(importer, new_entries, output)
print_extracted_entries(new_entries, output)
DESCRIPTION = "Extract transactions from downloads"
......@@ -231,7 +235,7 @@ def add_arguments(parser):
help='Write out the entries in descending order')
def run(args, _, importers_list, files_or_directories):
def run(args, _, importers_list, files_or_directories, detect_duplicates_func=None):
"""Run the subcommand."""
# Load the ledger, if one is specified.
......@@ -241,8 +245,11 @@ def run(args, _, importers_list, files_or_directories):
entries, options_map = None, None
extract(importers_list, files_or_directories, sys.stdout,
entries=entries, options_map=options_map,
mindate=None, ascending=args.ascending)
entries=entries,
options_map=options_map,
mindate=None,
ascending=args.ascending,
detect_duplicates_func=detect_duplicates_func)
return 0
......
......@@ -186,9 +186,8 @@ class TestPrintExtractedEntries(scripts_utils.TestScriptsBase, unittest.TestCase
entries[-2].meta[extract.DUPLICATE_META] = True
importer = TestPrintExtractedEntries.ExtractTestImporter()
oss = io.StringIO()
extract.print_extracted_entries(importer, entries, oss)
extract.print_extracted_entries(entries, oss)
self.assertEqual(textwrap.dedent("""\
......