Commit 922ca0b7 authored by Philip Hands's avatar Philip Hands Committed by Holger Levsen
Browse files

stop trying to make osuosl167 serve openqa.d.n



Signed-off-by: Holger Levsen's avatarHolger Levsen <holger@layer-acht.org>
parent f85a502a
# OpenQA settings for apache2. This file is meant to be shared
# between the http and the https vhosts.
#
DocumentRoot /usr/share/openqa/public
<Directory "/usr/share/openqa/public">
AllowOverride None
Require all granted
<Limit GET POST HEAD>
Order Allow,Deny
Allow from all
Deny from env=bad_bot
</Limit>
</Directory>
<Directory "/var/lib/openqa/share/factory">
AllowOverride None
Require all granted
# Need to set explicit type, see
# https://progress.opensuse.org/issues/68362
# https://httpd.apache.org/docs/2.4/mod/mod_mime.html#addtype
AddType application/octet-stream qcow2
<Limit GET POST HEAD>
Order Allow,Deny
Allow from all
Deny from env=bad_bot
</Limit>
</Directory>
Alias /assets "/var/lib/openqa/share/factory"
<Directory "/var/lib/openqa/images">
Options SymLinksIfOwnerMatch
AllowOverride None
Require all granted
# Use of ETag is discouraged when Last-Modified is present
Header unset ETag
FileETag None
# RFC says only cache for 1 year
ExpiresActive On
ExpiresDefault "access plus 1 year"
<Limit GET POST HEAD>
Order Allow,Deny
Allow from all
Deny from env=bad_bot
</Limit>
</Directory>
Alias /image "/var/lib/openqa/images"
<Directory "/var/lib/os-autoinst/tests">
AllowOverride None
Require all granted
<Limit GET POST HEAD>
Order Allow,Deny
Allow from all
Deny from env=bad_bot
</Limit>
</Directory>
<Proxy *>
Require all granted
<Limit GET POST HEAD>
Order Allow,Deny
Allow from all
Deny from env=bad_bot
</Limit>
</Proxy>
ProxyRequests Off
ProxyPreserveHost On
ProxyPass /robots.txt !
ProxyPass /favicon.ico !
ProxyPass /images !
ProxyPass /image !
ProxyPass /javascripts !
ProxyPass /stylesheets !
ProxyPass /assets !
ProxyPass /error !
# ensure websocket connections are handled as such by the reverse proxy while
# still being able to pass regular HTTP connections through it
RewriteEngine On
RewriteCond %{HTTP:Connection} Upgrade [NC]
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteRule /api/v1/ws/(.*) ws://localhost:9527/ws/$1 [P,L]
RewriteCond %{HTTP:Connection} Upgrade [NC]
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteRule /liveviewhandler/(.*) ws://localhost:9528/liveviewhandler/$1 [P,L]
# pass websocket server where the worker connects to port 9527
ProxyPass "/api/v1/ws/" "http://localhost:9527/ws/" keepalive=On
# pass websocket server to handle live view to port 9528
ProxyPass "/liveviewhandler/" "http://localhost:9528/liveviewhandler/" keepalive=On
ProxyPass / http://localhost:9526/ keepalive=On
ProxyPassReverse / http://localhost:9526/
<If "%{HTTPS} == 'on'">
RequestHeader set X-Forwarded-HTTPS "1"
RequestHeader set X-Forwarded-Proto "https"
</If>
HostnameLookups Off
UseCanonicalName Off
ServerSignature On
# TODO: try mojo 7.39 - https://github.com/kraih/mojo/commit/f9ff45e48f606
SetEnv proxy-nokeepalive 1
# Increase the timeout from 60s (default) to 300s to ensure that the large
# requests could finish without proxy timeouts problems.
# This value could be enough but not necessary. More investigation needed
ProxyTimeout 300
<Macro https-redirect $name>
<VirtualHost *:80>
ServerName $name
ServerAdmin holger@layer-acht.org
Redirect permanent / https://$name/
</VirtualHost>
</Macro>
<Macro common-directives-ssl $name>
SSLEngine on
SSLCertificateKeyFile /var/lib/dehydrated/certs/$name/privkey.pem
SSLCertificateFile /var/lib/dehydrated/certs/$name/fullchain.pem
SSLCipherSuite HIGH:!aNULL:!eNULL:!EXP:!LOW:!MD5
SSLHonorCipherOrder on
Header always add Strict-Transport-Security "max-age=15552000"
</Macro>
<Macro block-bots>
#block bad bots with a 403
SetEnvIf User-Agent "AhrefsBot" bad_bot
SetEnvIf User-Agent "Baiduspider" bad_bot
SetEnvIf User-Agent "bingbot" bad_bot
SetEnvIf User-Agent "DotBot" bad_bot
SetEnvIf User-Agent "Exabot" bad_bot
SetEnvIf User-Agent "Experibot" bad_bot
SetEnvIf User-Agent "facebookexternalhit" bad_bot
SetEnvIf User-Agent "FlipboardProxy" bad_bot
SetEnvIf User-Agent "Googlebot" bad_bot
SetEnvIf User-Agent "ltx71" bad_bot
SetEnvIf User-Agent "mediawords" bad_bot
SetEnvIf User-Agent "MetaURI" bad_bot
SetEnvIf User-Agent "MJ12bot" bad_bot
SetEnvIf User-Agent "SemrushBot" bad_bot
SetEnvIf User-Agent "Slackbot" bad_bot
SetEnvIf User-Agent "Sogou" bad_bot
SetEnvIf User-Agent "Twitterbot" bad_bot
SetEnvIf User-Agent "yacybot" bad_bot
SetEnvIf User-Agent "ZoomBot" bad_bot
</Macro>
<Macro common-directives $name>
ServerName $name
ServerAdmin holger@layer-acht.org
ErrorLog ${APACHE_LOG_DIR}/error.log
# Possible values include: debug, info, notice, warn, error, crit,
# alert, emerg.
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/access.log combined
</Macro>
Use https-redirect openqa.debian.net
<VirtualHost *:443>
Use common-directives openqa.debian.net
Use common-directives-ssl openqa.debian.net
Include /etc/apache2/sites-available/openqa-common.inc
Include /etc/dehydrated/apache.conf
</VirtualHost>
[global]
## Web site name for tab titles and bookmarks
#appname = openQA
## type of branding - [ openSUSE, plain, openqa.suse.de ]
branding = plain
## type and location of needle repo
scm = git
## space separated list of domains from which asset download with
## _URL params is allowed. Matched at the end of the hostname in
## the URL. with these values downloads from opensuse.org,
## dl.fedoraproject.org, and a.b.c.opensuse.org are allowed; downloads
## from moo.net, dl.opensuse and fedoraproject.org.evil are not
## default is undefined, meaning asset download is *disabled*, you
## must set this option to enable it
download_domains = cdimage.debian.org archive.kali.org
## set if you have a local repo mirror
#suse_mirror = http://FIXME
## base url [default: $self->req->url->base]
#base_url = http://FIXME
# days for Strict-Transport-Security, 0 to not add this header
# http://en.wikipedia.org/wiki/Strict-Transport-Security
# hsts = 365
## Set to 0 to disable auditing backend
# audit_enabled = 1
## Set to 1 to enable profiling
## * Needs Mojolicious::Plugin::NYTProf
## * Profiling data will be PUBLICLY accessible under /nytprof route.
## * The plugin impairs performance and the generated profiling data might quickly
## utilize a lot of disk space. So don't enable this plugin in production.
# profiling_enabled = 0
## Set to 1 to enable monitoring
## * Needs Mojolicious::Plugin::Status
## * Monitoring will be accessible to operators and admins under /monitoring route.
## * The plugin can impair performance significantly with prefork workers enabled.
## So don't enable this plugin in production.
# monitoring_enabled = 0
## space-separated list of extra plugins to load; plugin must be under
## OpenQA::WebAPI::Plugin and correctly-cased module name given here,
## this example loads OpenQA::WebAPI::Plugin::AMQP
#plugins = AMQP
## space-separated list of asset types *not* to show links for in the
# web UI. Default is 'repo'
#hide_asset_types = repo iso
## Recognized referers contains list of hostnames separated by space. When
# openQA detects (via 'Referer' header) that test job was accessed from
# this domain, this job is labeled as linked and considered as important
# recognized_referers = bugzilla.suse.com bugzilla.opensuse.org bugzilla.novell.com bugzilla.microfocus.com progress.opensuse.org github.com
recognized_referers = bugs.debian.org salsa.debian.org gitlab.com github.com
## A regex in a string of test settings to ignore for "job investigation"
#job_investigate_ignore = "(JOBTOKEN|NAME)"
# Specify the number of seconds until an unresponsive worker is considered offline
# and its currently assigned jobs are taken care of by the stale job detection
#worker_timeout = 1800
## Timeout for the git command in "job investigation"
#job_investigate_git_timeout = 20
## Gracefully restart the prefork workers if they reach a certain memory limit (in kB)
#max_rss_limit = 180000
## Causes jobs reported as incomplete by the worker to be cloned automatically when the
## reason matches; set to 0 to disable
#auto_clone_regex = ^(cache failure|terminated prematurely):
[scm git]
# name of remote to get updates from before commiting changes (e.g. origin, leave out-commented to disable remote update)
update_remote = salsa
# name of branch to rebase against before commiting changes (e.g. origin/master, leave out-commented to disable rebase)
update_branch = salsa/debian
# whether commited changes should be pushed
do_push = yes
## Authentication method to use for user management
[auth]
# method = Fake|OpenID|OAuth2
method = OAuth2
#for salsa.debian.org one can use:
[oauth2]
provider = debian_salsa
key = 7c9be60a197093e400a2d1a13f153df5e76cc734734a50f32f0a7fb0148b1a82
secret = 705c78dd46c3620b4b624a9ba3c93edbeb345c5484cf73e8d144e42ac00ed8e7
# alternatively, one can specify everything in this file,
# with no magic provider name like 'debian_salsa' or 'github'
#[oauth2]
#provider = custom
#key = ...
#secret = ...
#authorize_url => https://salsa.debian.org/oauth/authorize?response_type=code
#token_url => https://salsa.debian.org/oauth/token
#user_url => https://salsa.debian.org/api/v4/user
#token_scope => read_user
#token_label => Bearer
#nickname_from => username
[logging]
#logging is to stderr (so systemd journal) by default
#if you use a file, remember the apparmor profile!
#file = /var/log/openqa
#level = debug
#sql_debug = true
## Configuration for OpenID auth method
[openid]
## base url for openid provider
#provider = https://www.opensuse.org/openid/user/
## enforce redirect back to https
httpsonly = 1
## Configuration for auditing plugin
[audit]
# disable auditing of chatty events by default
blocklist = job_grab job_done
# Sets the storage duration in days for the different audit event types
[audit/storage_duration]
# By default cleanup is disabled, see http://open.qa/docs/#_auditing_tracking_openqa_changes
# The following categories with example values can be uncommented as needed
startup = 10
jobgroup = 365
jobtemplate = 365
table = 365
iso = 60
user = 60
asset = 30
needle = 30
other = 15
## Configuration for AMQP plugin
[amqp]
#heartbeat_timeout = 60
#reconnect_timeout = 5
# guest/guest is the default anonymous user/pass for RabbitMQ
#url = amqp://guest:guest@localhost:5672/
#exchange = pubsub
#topic_prefix = suse
# Default limits for cleanup (sizes are in GiB, durations in days, zero denotes infinity)
[default_group_limits]
asset_size_limit = 50 # only used on job group level (parent groups have no default)
log_storage_duration = 30
important_log_storage_duration = 120
#result_storage_duration = 365
result_storage_duration = 90
important_result_storage_duration = 0
[misc_limits]
#untracked_assets_storage_duration = 14
# Specify the screenshot ID range to query at once from the database (reduce to avoid big queries, increase to lower query overhead)
#screenshot_cleanup_batch_size = 200000
# Specify the number of screenshot ID ranges (with a size as configured by screenshot_cleanup_batch_size) to process in a single Minion
# job (reduce to avoid Minion jobs from running very long and possibly being interrupted, increase to reduce the number of Minion jobs)
#screenshot_cleanup_batches_per_minion_job = 450
# Extends the job result cleanup to ensure the partition results are stored on does not become too full
# (still experimental, relies on df)
#results_min_free_disk_space_percentage = 0
[job_settings_ui]
# Specify the keys of job settings which reference a file and should therefore be rendered
# as links to those files within the job settings tab.
# Directories should be under the `CASEDIR` root path or under the `data` folder of the `CASEDIR`. The `data`
# folder is used as default but it can be configured to cover needs of any test distribution. To change it, add the
# `default_data_dir` variable with the name of the directory.
#keys_to_render_as_links=YAML_SCHEDULE,AUTOYAST
[hooks]
# Specify custom hook scripts format `job_done_hook_$result` to be called when
# a job is done. Any executable specified in the variable as absolute path or
# executable name in `$PATH` is called with the job ID as first and only
# parameter corresponding to the `$result`, for example
#job_done_hook_failed = my-openqa-hook-failed
# Configuration for InfluxDB routes
[influxdb]
# Specify Minion task names which should never be counted towards the total of number failed Minion jobs.
#ignored_failed_minion_jobs = influxdb-minion-fail-job-task-name
......@@ -523,13 +523,9 @@ if [ -f /etc/debian_version ] ; then
if [ "$HOSTNAME" = "osuosl167-amd64" ]; then
# for phil only
DEBS="$DEBS postfix-pcre"
# OpenQA -- currently installed with help from:
# OpenQA worker -- currently installed with help from:
# hosts/osuosl167-amd64/etc/apt/sources.list.d/philh-openqa.list
DEBS="$DEBS
openqa
openqa-client
openqa-common
openqa-doc
openqa-worker
"
# only needed on the main node
......@@ -716,11 +712,6 @@ if [ "$HOSTNAME" = "jenkins" ] || [ "$HOSTNAME" = "ionos7-amd64" ] ; then
sudo a2ensite -q buildinfos.debian.net
sudo chown jenkins-adm.jenkins-adm /etc/apache2/sites-enabled/buildinfos.debian.net.conf
;;
osuosl167-amd64)
[ ! -e /etc/apache2/mods-enabled/expires.load ] || sudo a2enmod expires
sudo a2ensite -q openqa.debian.net
sudo chown jenkins-adm.jenkins-adm /etc/apache2/sites-enabled/openqa.debian.net.conf
;;
esac
# for reproducible.d.n url rewriting:
[ -L /var/www/userContent ] || sudo ln -sf /var/lib/jenkins/userContent /var/www/userContent
......@@ -920,6 +911,12 @@ EOF
fi
fi
# on openqa workers, need a cron job to do this regularly (or something less crap than a cron job, perhaps)
# dist=debian email=openqa@osuosl167-amd64.debian.net giturl=https://salsa.debian.org/philh/openqa-tests-debian.git branch=needleless needles_giturl=https://salsa.debian.org/philh/openqa-tests-debian.git needles_branch=debian-needles sh -x script/fetchneedles
# also: sudo systemctl enable openqa-worker@1.service
# possibly also: sudo systemctl start openqa-worker@1.service
#
# almost finally…
#
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment