Skip to content
Commits on Source (28)
language: cpp
sudo: false
addons:
apt:
sources:
- boost-latest
- ubuntu-toolchain-r-test
packages:
- g++-4.8
- libexpat1-dev
- libpq-dev
- libbz2-dev
- libproj-dev
- lua5.2
- liblua5.2-dev
- libluajit-5.1-dev
- libboost1.55-dev
- libboost-system1.55-dev
- libboost-filesystem1.55-dev
language: generic
sudo: required
git:
depth: 1
services:
- postgresql
addons_shortcuts:
addons_clang38_pg92: &clang38_pg92
postgresql: '9.2'
apt:
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-3.8']
packages: ['clang-3.8', 'postgresql-9.2-postgis-2.3',
'python3-psycopg2', 'libexpat1-dev', 'libpq-dev', 'libbz2-dev', 'libproj-dev',
'lua5.2', 'liblua5.2-dev', 'libluajit-5.1-dev',
'libboost1.55-dev', 'libboost-system1.55-dev', 'libboost-filesystem1.55-dev']
addons_clang7_pg96: &clang7_pg96
postgresql: '9.6'
apt:
update: true
sources:
- sourceline: 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main'
key_url: https://apt.llvm.org/llvm-snapshot.gpg.key
- ubuntu-toolchain-r-test
packages: ['clang-7','postgresql-9.6-postgis-2.3',
'python3-psycopg2', 'libexpat1-dev', 'libpq-dev', 'libbz2-dev', 'libproj-dev',
'lua5.2', 'liblua5.2-dev', 'libluajit-5.1-dev',
'libboost1.55-dev', 'libboost-system1.55-dev', 'libboost-filesystem1.55-dev']
addons_gcc48_pg96: &gcc48_pg96
postgresql: '9.6'
apt:
sources: ["ubuntu-toolchain-r-test"]
packages: ['g++-4.8','postgresql-9.6-postgis-2.3',
'python3-psycopg2', 'libexpat1-dev', 'libpq-dev', 'libbz2-dev', 'libproj-dev',
'lua5.2', 'liblua5.2-dev', 'libluajit-5.1-dev',
'libboost1.55-dev', 'libboost-system1.55-dev', 'libboost-filesystem1.55-dev']
addons_gcc8_pg96: &gcc8_pg96
postgresql: '9.6'
apt:
sources: ["ubuntu-toolchain-r-test"]
packages: ['g++-8','postgresql-9.6-postgis-2.3',
'python3-psycopg2', 'libexpat1-dev', 'libpq-dev', 'libbz2-dev', 'libproj-dev',
'lua5.2', 'liblua5.2-dev', 'libluajit-5.1-dev',
'libboost1.55-dev', 'libboost-system1.55-dev', 'libboost-filesystem1.55-dev']
# env: T="...." // please set an unique test id (T="..")
matrix:
include:
# ---- Linux + CLANG ---------------------------
- os: linux
compiler: clang
env: CXXFLAGS="-pedantic -Werror" LUAJIT_OPTION="OFF"
- os: linux
compiler: gcc
env: RUNTEST="-L NoDB" CXXFLAGS="-pedantic -Werror -fsanitize=address" LUAJIT_OPTION="OFF"
dist: trusty
compiler: "clang-3.8"
env: T="clang38_pg92_dbtest" LUAJIT_OPTION="OFF"
CXXFLAGS="-pedantic -Wextra -Werror"
CC=clang-3.8 CXX=clang++-3.8
addons: *clang38_pg92
- os: linux
compiler: clang
env: CXXFLAGS="-pedantic -Werror" LUAJIT_OPTION="ON"
- os: linux
compiler: gcc
env: RUNTEST="-L NoDB" CXXFLAGS="-pedantic -Werror -fsanitize=address" LUAJIT_OPTION="ON"
dist: trusty
compiler: "clang-7"
env: T="clang7_pg96_dbtest_luajit" LUAJIT_OPTION="ON"
CXXFLAGS="-pedantic -Wextra -Werror"
CC=clang-7 CXX=clang++-7
addons: *clang7_pg96
# ---- OSX + CLANG ---------------------------
- os: osx
compiler: clang
env: RUNTEST="-L NoDB" CXXFLAGS="-pedantic -Werror -fsanitize=address" LUAJIT_OPTION="OFF"
env: T="osx_clang_NoDB" LUAJIT_OPTION="OFF" TEST_NODB=1
CXXFLAGS="-pedantic -Wextra -Werror"
before_install:
- brew install lua; brew install lua
before_script:
- xml2-config --version
- proj | head -n1
- lua -v
# ---- Linux + GCC ---------------------------
- os: linux
dist: trusty
compiler: "gcc-4.8"
env: T="gcc48_pg96_dbtest" LUAJIT_OPTION="OFF"
CXXFLAGS="-pedantic -Wextra -Werror"
CC=gcc-4.8 CXX=g++-4.8
addons: *gcc48_pg96
- os: linux
dist: trusty
compiler: gcc-8
env: T="gcc8_pg96_dbtest_luajit" LUAJIT_OPTION="ON"
CXXFLAGS="-pedantic -Wextra -Werror"
CC=gcc-8 CXX=g++-8
addons: *gcc8_pg96
before_install:
- if [[ $TRAVIS_OS_NAME == 'osx' ]]; then
brew install lua;
fi
# update versions
install:
- if [[ $CC == 'gcc' ]]; then
export CC=gcc-4.8;
fi
- if [[ $CXX == 'g++' ]]; then
export CXX=g++-4.8;
fi
- dpkg -l | grep -E 'lua|proj|xml|bz2|postgis|zlib|boost|expat' # checking available versions
before_script:
- psql -U postgres -c "SELECT version()"
- psql -U postgres -c "CREATE EXTENSION postgis"
- psql -U postgres -c "CREATE EXTENSION hstore"
- psql -U postgres -c "SELECT PostGIS_Full_Version()"
- $CXX --version
- xml2-config --version
- proj | head -n1
......@@ -55,8 +115,15 @@ script:
- mkdir build && cd build
- cmake .. -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DWITH_LUAJIT=$LUAJIT_OPTION
- make -j2
- echo "Running tests that does not require PostgreSQL server"
- if [[ $RUNTEST ]]; then ctest -VV $RUNTEST; fi
- echo "Running tests ... "
- if [[ $TEST_NODB ]]; then
ctest -VV -L NoDB;
else
PG_VERSION=`psql -U postgres -t -c "SELECT version()" | head -n 1 | cut -d ' ' -f 3 | cut -d . -f 1-2`;
pg_virtualenv -v $PG_VERSION ctest -VV;
fi
after_failure:
- # rerun make, but verbosely
make VERBOSE=1
# end of .travis
set(PACKAGE osm2pgsql)
set(PACKAGE_NAME osm2pgsql)
set(PACKAGE_VERSION 0.96.0-RC1)
set(PACKAGE_VERSION 0.96.0)
cmake_minimum_required(VERSION 2.8.7)
......@@ -168,17 +168,18 @@ if (NOT HAVE_UNISTD_H AND NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/unistd.h)
endif()
set(osm2pgsql_lib_SOURCES
db-copy.cpp
expire-tiles.cpp
geometry-processor.cpp
id-tracker.cpp
middle-pgsql.cpp
middle-ram.cpp
middle.cpp
node-persistent-cache.cpp
node-ram-cache.cpp
options.cpp
osmdata.cpp
osmium-builder.cpp
gazetteer-style.cpp
output-gazetteer.cpp
output-multi.cpp
output-null.cpp
......@@ -197,6 +198,7 @@ set(osm2pgsql_lib_SOURCES
tagtransform-c.cpp
util.cpp
wildcmp.cpp
db-copy.hpp
expire-tiles.hpp
geometry-processor.hpp
id-tracker.hpp
......@@ -209,6 +211,7 @@ set(osm2pgsql_lib_SOURCES
osmdata.hpp
osmium-builder.hpp
osmtypes.hpp
gazetteer-style.hpp
output-gazetteer.hpp
output-multi.hpp
output-null.hpp
......
......@@ -73,12 +73,24 @@ executing ``ctest``.
Regression tests require python and psycopg to be installed. On Ubuntu run:
```sh
sudo apt-get install python-psycopg2
sudo apt-get install python3-psycopg2
```
Most of these tests depend on being able to set up a database and run osm2pgsql
against it. You need to ensure that PostgreSQL is running and that your user is
a superuser of that system. To do that, run:
against it. This is most easily done using ``pg_virtualenv``. Just run
```sh
pg_virtualenv ctest
```
``pg_virtualenv`` creates a separate postgres server instance. The test databases
are created in this instance and the complete server is destroyed after the
tests are finished. ctest also calls appropriate fixtures that create the
separate tablespace required for some tests.
When running without ``pg_virtualenv``, you need to ensure that PostgreSQL is
running and that your user is a superuser of that system. You also need to
create an appropriate test tablespace manually. To do that, run:
```sh
sudo -u postgres createuser -s $USER
......@@ -94,9 +106,6 @@ to be a bug, please check to see if it is a known issue at
https://github.com/openstreetmap/osm2pgsql/issues and, if it's not
already known, report it there.
If running the tests in a virtual machine, allocate sufficient disk space for a
20GB flat nodes file.
### Performance Testing
If performance testing with a full planet import is required, indicate what
......
......@@ -21,8 +21,8 @@ Nominatim, or general analysis.
Most Linux distributions include osm2pgsql. It is also available on macOS with [Homebrew](http://brew.sh/).
Unoffical builds for Windows are available from [AppVeyor](https://ci.appveyor.com/project/openstreetmap/osm2pgsql/history) but you need to find the right build artifacts.
For the latest release 0.94.1, you may download a
[32bit version](https://ci.appveyor.com/api/projects/openstreetmap/osm2pgsql/artifacts/osm2pgsql_Release_x86.zip?tag=0.94.1&job=Environment%3A%20arch%3Dx86) or [64bit version](https://ci.appveyor.com/api/projects/openstreetmap/osm2pgsql/artifacts/osm2pgsql_Release_x64.zip?tag=0.94.1&job=Environment%3A%20arch%3Dx64).
For the latest release 0.96.0, you may download a
[32bit version](https://ci.appveyor.com/api/projects/openstreetmap/osm2pgsql/artifacts/osm2pgsql_Release_x86.zip?tag=0.96.0&job=Environment%3A%20arch%3Dx86) or [64bit version](https://ci.appveyor.com/api/projects/openstreetmap/osm2pgsql/artifacts/osm2pgsql_Release_x64.zip?tag=0.96.0&job=Environment%3A%20arch%3Dx64).
## Building ##
......@@ -172,6 +172,36 @@ null backend for testing. For flexibility a new [multi](docs/multi.md)
backend is also available which allows the configuration of custom
PostgreSQL tables instead of those provided in the pgsql backend.
## LuaJIT support ##
To speed up Lua tag transformations, [LuaJIT](http://luajit.org/) can be optionally
enabled on supported platforms. Performance measurements have shown about 25%
runtime reduction for a planet import, with about 40% reduction on parsing time.
On a Debian or Ubuntu system, this can be done with:
```sh
sudo apt install libluajit-5.1-dev
```
Configuration parameter `WITH_LUAJIT=ON` needs to be added to enable LuaJIT.
Otherwise make and installation steps are identical to the description above.
```sh
cmake -D WITH_LUAJIT=ON ..
```
Use `osm2pgsql --version` to verify that the build includes LuaJIT support:
```sh
./osm2pgsql --version
osm2pgsql version 0.96.0 (64 bit id space)
Compiled using the following library versions:
Libosmium 2.15.0
Lua 5.1.4 (LuaJIT 2.1.0-beta3)
```
## Contributing ##
We welcome contributions to osm2pgsql. If you would like to report an issue,
......
......@@ -10,7 +10,7 @@
find_path(LUAJIT_INCLUDE_DIR luajit.h
HINTS
ENV LUA_DIR
PATH_SUFFIXES include/luajit-2.0 include
PATH_SUFFIXES include/luajit-2.0 include/luajit-2.1 include
PATHS
~/Library/Frameworks
/Library/Frameworks
......
......@@ -71,6 +71,9 @@ find_path(OSMIUM_INCLUDE_DIR osmium/version.hpp
# Check libosmium version number
if(Osmium_FIND_VERSION)
if(NOT EXISTS "${OSMIUM_INCLUDE_DIR}/osmium/version.hpp")
message(FATAL_ERROR "Missing ${OSMIUM_INCLUDE_DIR}/osmium/version.hpp. Either your libosmium version is too old, or libosmium wasn't found in the place you said.")
endif()
file(STRINGS "${OSMIUM_INCLUDE_DIR}/osmium/version.hpp" _libosmium_version_define REGEX "#define LIBOSMIUM_VERSION_STRING")
if("${_libosmium_version_define}" MATCHES "#define LIBOSMIUM_VERSION_STRING \"([0-9.]+)\"")
set(_libosmium_version "${CMAKE_MATCH_1}")
......@@ -111,7 +114,7 @@ endif()
if(Osmium_USE_PBF)
find_package(ZLIB)
find_package(Threads)
find_package(Protozero 1.5.1)
find_package(Protozero 1.6.3)
list(APPEND OSMIUM_EXTRA_FIND_VARS ZLIB_FOUND Threads_FOUND PROTOZERO_INCLUDE_DIR)
if(ZLIB_FOUND AND Threads_FOUND AND PROTOZERO_FOUND)
......@@ -324,7 +327,7 @@ if(MSVC)
add_definitions(-DNOMINMAX -DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_WARNINGS)
endif()
if(APPLE)
if(APPLE AND "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# following only available from cmake 2.8.12:
# add_compile_options(-stdlib=libc++)
# so using this instead:
......
#cmakedefine HAVE_LSEEK64 1
#cmakedefine HAVE_LUA 1
#cmakedefine HAVE_LUAJIT 1
#cmakedefine HAVE_POSIX_FADVISE 1
#cmakedefine HAVE_POSIX_FALLOCATE 1
#cmakedefine HAVE_SYNC_FILE_RANGE 1
......
#include <boost/format.hpp>
#include <cassert>
#include <cstdio>
#include <future>
#include <thread>
#include "db-copy.hpp"
#include "pgsql.hpp"
using fmt = boost::format;
db_copy_thread_t::db_copy_thread_t(std::string const &conninfo)
: m_conninfo(conninfo), m_conn(nullptr)
{
m_worker = std::thread([this]() {
try {
worker_thread();
} catch (std::runtime_error const &e) {
fprintf(stderr, "DB writer thread failed due to ERROR: %s\n",
e.what());
exit(2);
}
});
}
db_copy_thread_t::~db_copy_thread_t() { finish(); }
void db_copy_thread_t::add_buffer(std::unique_ptr<db_cmd_t> &&buffer)
{
assert(m_worker.joinable()); // thread must not have been finished
std::unique_lock<std::mutex> lock(m_queue_mutex);
m_worker_queue.push_back(std::move(buffer));
m_queue_cond.notify_one();
}
void db_copy_thread_t::sync_and_wait()
{
std::promise<void> barrier;
std::future<void> sync = barrier.get_future();
add_buffer(std::unique_ptr<db_cmd_t>(new db_cmd_sync_t(std::move(barrier))));
sync.wait();
}
void db_copy_thread_t::finish()
{
if (m_worker.joinable()) {
finish_copy();
add_buffer(std::unique_ptr<db_cmd_t>(new db_cmd_finish_t()));
m_worker.join();
}
}
void db_copy_thread_t::worker_thread()
{
connect();
bool done = false;
while (!done) {
std::unique_ptr<db_cmd_t> item;
{
std::unique_lock<std::mutex> lock(m_queue_mutex);
if (m_worker_queue.empty()) {
m_queue_cond.wait(lock);
continue;
}
item = std::move(m_worker_queue.front());
m_worker_queue.pop_front();
}
switch (item->type) {
case db_cmd_t::Cmd_copy:
write_to_db(static_cast<db_cmd_copy_t *>(item.get()));
break;
case db_cmd_t::Cmd_sync:
finish_copy();
static_cast<db_cmd_sync_t *>(item.get())->barrier.set_value();
break;
case db_cmd_t::Cmd_finish:
done = true;
break;
}
}
finish_copy();
disconnect();
}
void db_copy_thread_t::connect()
{
assert(!m_conn);
PGconn *conn = PQconnectdb(m_conninfo.c_str());
if (PQstatus(conn) != CONNECTION_OK)
throw std::runtime_error(
(fmt("Connection to database failed: %1%\n") % PQerrorMessage(conn))
.str());
m_conn = conn;
// Let commits happen faster by delaying when they actually occur.
pgsql_exec_simple(m_conn, PGRES_COMMAND_OK,
"SET synchronous_commit TO off;");
}
void db_copy_thread_t::disconnect()
{
if (!m_conn)
return;
PQfinish(m_conn);
m_conn = nullptr;
}
void db_copy_thread_t::write_to_db(db_cmd_copy_t *buffer)
{
if (!buffer->deletables.empty() ||
(m_inflight && !buffer->target->same_copy_target(*m_inflight.get())))
finish_copy();
if (!buffer->deletables.empty())
delete_rows(buffer);
if (!m_inflight)
start_copy(buffer->target);
pgsql_CopyData(buffer->target->name.c_str(), m_conn, buffer->buffer);
}
void db_copy_thread_t::delete_rows(db_cmd_copy_t *buffer)
{
assert(!m_inflight);
std::string sql = "DELETE FROM ";
sql.reserve(buffer->target->name.size() + buffer->deletables.size() * 15 +
30);
sql += buffer->target->name;
sql += " WHERE ";
sql += buffer->target->id;
sql += " IN (";
for (auto id : buffer->deletables) {
sql += std::to_string(id);
sql += ',';
}
sql[sql.size() - 1] = ')';
pgsql_exec_simple(m_conn, PGRES_COMMAND_OK, sql);
}
void db_copy_thread_t::start_copy(std::shared_ptr<db_target_descr_t> const &target)
{
m_inflight = target;
std::string copystr = "COPY ";
copystr.reserve(target->name.size() + target->rows.size() + 14);
copystr += target->name;
if (!target->rows.empty()) {
copystr += '(';
copystr += target->rows;
copystr += ')';
}
copystr += " FROM STDIN";
pgsql_exec_simple(m_conn, PGRES_COPY_IN, copystr);
m_inflight = target;
}
void db_copy_thread_t::finish_copy()
{
if (!m_inflight)
return;
if (PQputCopyEnd(m_conn, nullptr) != 1)
throw std::runtime_error((fmt("stop COPY_END for %1% failed: %2%\n") %
m_inflight->name %
PQerrorMessage(m_conn))
.str());
pg_result_t res(PQgetResult(m_conn));
if (PQresultStatus(res.get()) != PGRES_COMMAND_OK)
throw std::runtime_error((fmt("result COPY_END for %1% failed: %2%\n") %
m_inflight->name %
PQerrorMessage(m_conn))
.str());
m_inflight.reset();
}
db_copy_mgr_t::db_copy_mgr_t(std::shared_ptr<db_copy_thread_t> const &processor)
: m_processor(processor)
{}
void db_copy_mgr_t::new_line(std::shared_ptr<db_target_descr_t> const &table)
{
if (!m_current || !m_current->target->same_copy_target(*table.get())) {
if (m_current) {
m_processor->add_buffer(std::move(m_current));
}
m_current.reset(new db_cmd_copy_t(table));
}
}
void db_copy_mgr_t::delete_id(osmid_t osm_id)
{
assert(m_current);
m_current->deletables.push_back(osm_id);
}
void db_copy_mgr_t::sync()
{
// finish any ongoing copy operations
if (m_current) {
m_processor->add_buffer(std::move(m_current));
}
m_processor->sync_and_wait();
}
void db_copy_mgr_t::finish_line()
{
assert(m_current);
auto &buf = m_current->buffer;
assert(!buf.empty());
// Expect that a column has been written last which ended in a '\t'.
// Replace it with the row delimiter '\n'.
auto sz = buf.size();
assert(buf[sz - 1] == '\t');
buf[sz - 1] = '\n';
if (sz > db_cmd_copy_t::Max_buf_size - 100) {
m_processor->add_buffer(std::move(m_current));
}
}
#ifndef DB_COPY_HPP
#define DB_COPY_HPP
#include <condition_variable>
#include <deque>
#include <future>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
#include "osmtypes.hpp"
struct pg_conn;
/**
* Table information necessary for building SQL queries.
*/
struct db_target_descr_t
{
/// Name of the target table for the copy operation.
std::string name;
/// Comma-separated list of rows for copy operation (when empty: all rows)
std::string rows;
/// Name of id column used when deleting objects.
std::string id;
/**
* Check if the buffer would use exactly the same copy operation.
*/
bool same_copy_target(db_target_descr_t const &other) const noexcept
{
return (this == &other) || (name == other.name && rows == other.rows);
}
db_target_descr_t() = default;
db_target_descr_t(char const *n, char const *i, char const *r = "")
: name(n), rows(r), id(i)
{
}
};
/**
* A command for the copy thread to execute.
*/
class db_cmd_t
{
public:
enum cmd_t
{
Cmd_copy, ///< Copy buffer content into given target.
Cmd_sync, ///< Synchronize with parent.
Cmd_finish
};
virtual ~db_cmd_t() = default;
cmd_t type;
protected:
explicit db_cmd_t(cmd_t t)
: type(t)
{
}
};
struct db_cmd_copy_t : public db_cmd_t
{
enum { Max_buf_size = 10 * 1024 * 1024 };
/// Name of the target table for the copy operation
std::shared_ptr<db_target_descr_t> target;
/// Vector with object to delete before copying
std::vector<osmid_t> deletables;
/// actual copy buffer
std::string buffer;
explicit db_cmd_copy_t(std::shared_ptr<db_target_descr_t> const &t)
: db_cmd_t(db_cmd_t::Cmd_copy), target(t)
{
buffer.reserve(Max_buf_size);
}
};
struct db_cmd_sync_t : public db_cmd_t
{
std::promise<void> barrier;
explicit db_cmd_sync_t(std::promise<void> &&b)
: db_cmd_t(db_cmd_t::Cmd_sync), barrier(std::move(b))
{
}
};
struct db_cmd_finish_t : public db_cmd_t
{
db_cmd_finish_t() : db_cmd_t(db_cmd_t::Cmd_finish) {}
};
/**
* The worker thread that streams copy data into the database.
*/
class db_copy_thread_t
{
public:
db_copy_thread_t(std::string const &conninfo);
~db_copy_thread_t();
/**
* Add another command for the worker.
*/
void add_buffer(std::unique_ptr<db_cmd_t> &&buffer);
/**
* Send sync command and wait for the notification.
*/
void sync_and_wait();
/**
* Finish the copy process.
*
* Only returns when all remaining data has been committed to the
* database.
*/
void finish();
private:
void worker_thread();
void connect();
void disconnect();
void write_to_db(db_cmd_copy_t *buffer);
void start_copy(std::shared_ptr<db_target_descr_t> const &target);
void finish_copy();
void delete_rows(db_cmd_copy_t *buffer);
std::string m_conninfo;
pg_conn *m_conn;
std::thread m_worker;
std::mutex m_queue_mutex;
std::condition_variable m_queue_cond;
std::deque<std::unique_ptr<db_cmd_t>> m_worker_queue;
// Target for copy operation currently ongoing.
std::shared_ptr<db_target_descr_t> m_inflight;
};
/**
* Management class that fills and manages copy buffers.
*/
class db_copy_mgr_t
{
public:
db_copy_mgr_t(std::shared_ptr<db_copy_thread_t> const &processor);
/**
* Start a new table row.
*
* Also starts a new buffer if either the table is not the same as
* the table of currently buffered data or no buffer is pending.
*/
void new_line(std::shared_ptr<db_target_descr_t> const &table);
/**
* Finish a table row.
*
* Adds the row delimiter to the buffer. If the buffer is at capacity
* it will be forwarded to the copy thread.
*/
void finish_line();
/**
* Add many simple columns.
*
* See add_column().
*/
template <typename T, typename ...ARGS>
void add_columns(T value, ARGS&&... args)
{
add_column(value);
add_columns(args...);
}
template <typename T>
void add_columns(T value)
{
add_column(value);
}
/**
* Add a column entry of simple type.
*
* Writes the column with the escaping apporpriate for the type and
* a column delimiter.
*/
template <typename T>
void add_column(T value)
{
add_value(value);
m_current->buffer += '\t';
}
/**
* Add an empty column.
*
* Adds a NULL value for the column.
*/
void add_null_column() { m_current->buffer += "\\N\t"; }
/**
* Start an array column.
*
* An array is a list of simple elements of the same type.
*
* Must be finished with a call to finish_array().
*/
void new_array() { m_current->buffer += "{"; }
/**
* Add a single value to an array column.
*
* Adds the value in the format appropriate for an array and a value
* separator.
*/
template <typename T>
void add_array_elem(T value)
{
add_value(value);
m_current->buffer += ',';
}
void add_array_elem(std::string const &s) { add_array_elem(s.c_str()); }
void add_array_elem(char const *s)
{
assert(m_current);
m_current->buffer += '"';
add_escaped_string(s);
m_current->buffer += "\",";
}
/**
* Finish an array column previously started with new_array().
*
* The array may be empty. If it does contain elements, the separator after
* the final element is replaced with the closing array bracket.
*/
void finish_array()
{
auto idx = m_current->buffer.size() - 1;
if (m_current->buffer[idx] == '{')
m_current->buffer += '}';
else
m_current->buffer[idx] = '}';
m_current->buffer += '\t';
}
/**
* Start a hash column.
*
* A hash column contains a list of key/value pairs. May be represented
* by a hstore or json in Postgresql.
*
* currently a hstore column is written which does not have any start
* markers.
*
* Must be closed with a finish_hash() call.
*/
void new_hash() { /* nothing */}
void add_hash_elem(std::string const &k, std::string const &v)
{
add_hash_elem(k.c_str(), v.c_str());
}
/**
* Add a key/value pair to a hash column.
*
* Key and value must be strings and will be appropriately escaped.
* A separator for the next pair is added at the end.
*/
void add_hash_elem(char const *k, char const *v)
{
m_current->buffer += '"';
add_escaped_string(k);
m_current->buffer += "\"=>\"";
add_escaped_string(v);
m_current->buffer += "\",";
}
/**
* Add a key/value pair to a hash column without escaping.
*
* Key and value must be strings and will NOT be appropriately escaped.
* A separator for the next pair is added at the end.
*/
void add_hash_elem_noescape(char const *k, char const *v)
{
m_current->buffer += '"';
m_current->buffer += k;
m_current->buffer += "\"=>\"";
m_current->buffer += v;
m_current->buffer += "\",";
}
/**
* Add a key (unescaped) and a numeric value to a hash column.
*
* Key must be string and come from a safe source because it will NOT be
* escaped! The value should be convertible using std::to_string.
* A separator for the next pair is added at the end.
*
* This method is suitable to insert safe input, e.g. numeric OSM metadata
* (eg. uid) but not unsafe input like user names.
*/
template <typename T>
void add_hstore_num_noescape(char const *k, T const value)
{
m_current->buffer += '"';
m_current->buffer += k;
m_current->buffer += "\"=>\"";
m_current->buffer += std::to_string(value);
m_current->buffer += "\",";
}
/**
* Close a hash previously started with new_hash().
*
* The hash may be empty. If elements were present, the separator
* of the final element is overwritten with the closing \t.
*/
void finish_hash()
{
auto idx = m_current->buffer.size() - 1;
if (!m_current->buffer.empty() && m_current->buffer[idx] == ',') {
m_current->buffer[idx] = '\t';
} else {
m_current->buffer += '\t';
}
}
/**
* Add a column with the given WKB geometry in WKB hex format.
*
* The geometry is converted on-the-fly from WKB binary to WKB hex.
*/
void add_hex_geom(std::string const &wkb)
{
char const *lookup_hex = "0123456789ABCDEF";
for (char c : wkb) {
m_current->buffer += lookup_hex[(c >> 4) & 0xf];
m_current->buffer += lookup_hex[c & 0xf];
}
m_current->buffer += '\t';
}
/**
* Mark an OSM object for deletion in the current table.
*
* The object is guaranteed to be deleted before any lines
* following the delete_id() are inserted.
*/
void delete_id(osmid_t osm_id);
/**
* Synchronize with worker.
*
* Only returns when all previously issued commands are done.
*/
void sync();
private:
template <typename T>
void add_value(T value)
{
m_current->buffer += std::to_string(value);
}
void add_value(double value)
{
char tmp[32];
snprintf(tmp, sizeof(tmp), "%g", value);
m_current->buffer += tmp;
}
void add_value(std::string const &s) { add_value(s.c_str()); }
void add_value(char const *s)
{
assert(m_current);
for (char const *c = s; *c; ++c) {
switch (*c) {
case '"':
m_current->buffer += "\\\"";
break;
case '\\':
m_current->buffer += "\\\\";
break;
case '\n':
m_current->buffer += "\\n";
break;
case '\r':
m_current->buffer += "\\r";
break;
case '\t':
m_current->buffer += "\\t";
break;
default:
m_current->buffer += *c;
break;
}
}
}
void add_escaped_string(char const *s)
{
for (char const *c = s; *c; ++c) {
switch (*c) {
case '"':
m_current->buffer += "\\\\\"";
break;
case '\\':
m_current->buffer += "\\\\\\\\";
break;
case '\n':
m_current->buffer += "\\n";
break;
case '\r':
m_current->buffer += "\\r";
break;
case '\t':
m_current->buffer += "\\t";
break;
default:
m_current->buffer += *c;
break;
}
}
}
std::shared_ptr<db_copy_thread_t> m_processor;
std::unique_ptr<db_cmd_copy_t> m_current;
};
#endif
osm2pgsql (1.00.0~rc1+ds-1~exp1) experimental; urgency=medium
* New upstream release candidate.
* Add patch to fix spelling errors.
-- Bas Couwenberg <sebastic@debian.org> Mon, 19 Aug 2019 05:44:53 +0200
osm2pgsql (0.96.0+ds-3) unstable; urgency=medium
* Bump Standards-Version to 4.4.0, no changes.
* Update watch file to limit matches to archive path.
* Define ACCEPT_USE_OF_DEPRECATED_PROJ_API_H for PROJ 6.0.0 compatibility.
* Update gbp.conf to use --source-only-changes by default.
* Append -DNDEBUG to CXXFLAGS to remove buildpath from binaries.
-- Bas Couwenberg <sebastic@debian.org> Thu, 11 Jul 2019 11:55:16 +0200
osm2pgsql (0.96.0+ds-2) unstable; urgency=medium
* Strip trailing whitespace from changelog, control & rules files.
* Bump Standards-Version to 4.1.5, no changes.
* Drop autopkgtest to test installability.
* Add lintian override for testsuite-autopkgtest-missing.
-- Bas Couwenberg <sebastic@debian.org> Wed, 01 Aug 2018 09:55:45 +0200
osm2pgsql (0.96.0+ds-1) unstable; urgency=medium
* New upstream release.
* Move from experimental to unstable.
-- Bas Couwenberg <sebastic@debian.org> Thu, 03 May 2018 07:09:33 +0200
osm2pgsql (0.96.0~rc1+ds-1~exp1) experimental; urgency=medium
* New upstream release candidate.
......@@ -470,4 +503,3 @@ osm2pgsql (0.08.20071007-1) unstable; urgency=low
* Initial release (Closes: #444705)
-- Andreas Putzo <andreas@putzo.net> Sun, 07 Oct 2007 13:41:33 +0000
......@@ -19,9 +19,9 @@ Build-Depends: debhelper (>= 9),
zlib1g-dev,
liblua5.2-dev,
lua5.2,
python,
python-psycopg2
Standards-Version: 4.1.4
python3,
python3-psycopg2
Standards-Version: 4.4.0
Vcs-Browser: https://salsa.debian.org/debian-gis-team/osm2pgsql
Vcs-Git: https://salsa.debian.org/debian-gis-team/osm2pgsql.git -b experimental
Homepage: https://wiki.openstreetmap.org/wiki/Osm2pgsql
......@@ -36,4 +36,3 @@ Description: OpenStreetMap data to PostgreSQL converter
into a PostgreSQL database with PostGIS geospatial extensions. This
database may then be used to render maps with Mapnik or for other
geospatial analysis.
......@@ -14,3 +14,6 @@ upstream-tag = upstream/%(version)s
# Always use pristine-tar.
pristine-tar = True
[buildpackage]
pbuilder-options = --source-only-changes
spelling-errors.patch
Description: Fix spelling errors.
* Ambigious -> Ambiguous
Author: Bas Couwenberg <sebastic@debian.org>
Forwarded: https://github.com/openstreetmap/osm2pgsql/pull/949
--- a/gazetteer-style.cpp
+++ b/gazetteer-style.cpp
@@ -170,7 +170,7 @@ void gazetteer_style_t::add_style_entry(
// prefix: works on empty key only
if (key[key.size() - 1] == '*') {
if (key.size() == 1) {
- throw std::runtime_error("Style error. Ambigious key '*'.");
+ throw std::runtime_error("Style error. Ambiguous key '*'.");
}
if (!value.empty()) {
throw std::runtime_error(
......@@ -10,8 +10,8 @@ export DEB_BUILD_MAINT_OPTIONS=hardening=+all
# Verbose test output
export VERBOSE=1
CFLAGS += $(CPPFLAGS)
CXXFLAGS += $(CPPFLAGS)
CFLAGS += $(CPPFLAGS) -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H -DNDEBUG
CXXFLAGS += $(CPPFLAGS) -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H -DNDEBUG
%:
dh $@ \
......@@ -31,4 +31,3 @@ override_dh_install:
override_dh_compress:
# clean up some naive file permissions
dh_compress -X.php -X.sql -X.js -X.c -X.h
# Not worth the effort
testsuite-autopkgtest-missing
# Test installability
Depends: @
Test-Command: /bin/true
......@@ -5,4 +5,4 @@ uversionmangle=s/_/./g;s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha|b|a)[\-\.]?\d*
filenamemangle=s/(?:.*?\/)?(?:rel|v|osm2pgsql)?[\-\_]?(\d\S+)\.(tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))/osm2pgsql-$1.$2/,\
repacksuffix=+ds \
https://github.com/openstreetmap/osm2pgsql/releases \
(?:.*?/)?(?:rel|v|osm2pgsql)?[\-\_]?(\d\S+)\.(?:tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
(?:.*?/archive/)?(?:rel|v|osm2pgsql)?[\-\_]?(\d\S+)\.(?:tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
......@@ -200,12 +200,6 @@ invalid polygons. With this option, invalid polygons are instead simply dropped
from the database. Even without this option, all polygons in the database should
be valid.
.TP
\fB\ \fR\-\-unlogged
Use postgresql's unlogged tables for storing data. This requires PostgreSQL 9.1
or above. Data written to unlogged tables is not written to PostgreSQL's write\-ahead log,
which makes them considerably faster than ordinary tables. However, they are not
crash\-safe: an unlogged table is automatically truncated after a crash or unclean shutdown.
.TP
\fB\ \fR\-\-number\-processes num
Specifies the number of parallel processes used for certain operations. If disks are
fast enough e.g. if you have an SSD, then this can greatly increase speed of
......
......@@ -27,6 +27,15 @@ tables which are used by the pgsql middle layer, not the backend:
With the ``--flat-nodes`` option, the ``planet_osm_nodes`` information is
instead stored in a binary file.
**Note:** The names and structure of these additional tables, colloquially
referred to as "slim tables", are an *internal implemention detail* of
osm2pgsql. While they do not usually change between releases of osm2pgsql,
be advised that if you rely on the content or layout of these tables in
your application, it is your responsibility to check whether your assumptions
are still true in a newer version of osm2pgsql before updating. See
https://github.com/openstreetmap/osm2pgsql/issues/230 for a discussion of
the topic.
## Importing ##
1. Runs a parser on the input file and processes the nodes, ways and relations.
......