Update docker stuff and bug fixes

This commit is contained in:
Khanh Ngo 2019-12-04 11:50:46 +07:00
parent 8ea00b9484
commit 840e2a4750
37 changed files with 444 additions and 1082 deletions

112
.dockerignore Normal file
View File

@ -0,0 +1,112 @@
### OSX ###
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
.pytest_cache/
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Flask stuff:
flask/
instance/settings.py
.webassets-cache
# Scrapy stuff:
.scrapy
# celery beat schedule file
celerybeat-schedule.*
# Node
node_modules
npm-debug.log
# Docker
Dockerfile*
docker-compose*
.dockerignore
# Git
.git
.gitattributes
.gitignore
# Vscode
.vscode
*.code-workspace
# Others
.lgtm.yml
.travis.yml

16
.env
View File

@ -1,16 +0,0 @@
ENVIRONMENT=development
PDA_DB_HOST=powerdns-admin-mysql
PDA_DB_NAME=powerdns_admin
PDA_DB_USER=powerdns_admin
PDA_DB_PASSWORD=changeme
PDA_DB_PORT=3306
PDNS_DB_HOST=pdns-mysql
PDNS_DB_NAME=pdns
PDNS_DB_USER=pdns
PDNS_DB_PASSWORD=changeme
PDNS_HOST=pdns-server
PDNS_API_KEY=changeme
PDNS_WEBSERVER_ALLOW_FROM=0.0.0.0

17
.gitignore vendored
View File

@ -25,22 +25,17 @@ nosetests.xml
flask
config.py
configs/production.py
logfile.log
settings.json
advanced_settings.json
idp.crt
log.txt
db_repository/*
upload/avatar/*
tmp/*
.ropeproject
.sonarlint/*
pdns.db
idp.crt
*.bak
db_repository/*
tmp/*
node_modules
.webassets-cache
app/static/generated
.webassets-cache
.venv*
.pytest_cache

View File

@ -1 +1 @@
--*.modules-folder "./app/static/node_modules"
--*.modules-folder "./powerdnsadmin/static/node_modules"

147
README.md
View File

@ -38,144 +38,11 @@ You can now access PowerDNS-Admin at url http://localhost:9191
**NOTE:** For other methods to run PowerDNS-Admin, please take look at WIKI pages.
## Build production docker container image
```
$ docker build -t powerdns-admin:latest -f docker/Production/Dockerfile .
```
### Screenshots
![dashboard](https://user-images.githubusercontent.com/6447444/44068603-0d2d81f6-9fa5-11e8-83af-14e2ad79e370.png)
### Running tests
**NOTE:** Tests will create `__pycache__` folders which will be owned by root, which might be issue during rebuild
thus (e.g. invalid tar headers message) when such situation occurs, you need to remove those folders as root
1. Build images
```
docker-compose -f docker-compose-test.yml build
```
2. Run tests
```
docker-compose -f docker-compose-test.yml up
```
3. Rerun tests
```
docker-compose -f docker-compose-test.yml down
```
To teardown previous environment
```
docker-compose -f docker-compose-test.yml up
```
To run tests again
### API Usage
1. run docker image docker-compose up, go to UI http://localhost:9191, at http://localhost:9191/swagger is swagger API specification
2. click to register user, type e.g. user: admin and password: admin
3. login to UI in settings enable allow domain creation for users,
now you can create and manage domains with admin account and also ordinary users
4. Encode your user and password to base64, in our example we have user admin and password admin so in linux cmd line we type:
```
someuser@somehost:~$echo -n 'admin:admin'|base64
YWRtaW46YWRtaW4=
```
we use generated output in basic authentication, we authenticate as user,
with basic authentication, we can create/delete/get zone and create/delete/get/update apikeys
creating domain:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46YWRtaW4=' -X POST http://localhost:9191/api/v1/pdnsadmin/zones --data '{"name": "yourdomain.com.", "kind": "NATIVE", "nameservers": ["ns1.mydomain.com."]}'
```
creating apikey which has Administrator role, apikey can have also User role, when creating such apikey you have to specify also domain for which apikey is valid:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46YWRtaW4=' -X POST http://localhost:9191/api/v1/pdnsadmin/apikeys --data '{"description": "masterkey","domains":[], "role": "Administrator"}'
```
call above will return response like this:
```
[{"description": "samekey", "domains": [], "role": {"name": "Administrator", "id": 1}, "id": 2, "plain_key": "aGCthP3KLAeyjZI"}]
```
we take plain_key and base64 encode it, this is the only time we can get API key in plain text and save it somewhere:
```
someuser@somehost:~$echo -n 'aGCthP3KLAeyjZI'|base64
YUdDdGhQM0tMQWV5alpJ
```
We can use apikey for all calls specified in our API specification (it tries to follow powerdns API 1:1, only tsigkeys endpoints are not yet implemented), don't forget to specify Content-Type!
getting powerdns configuration:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'X-API-KEY: YUdDdGhQM0tMQWV5alpJ' -X GET http://localhost:9191/api/v1/servers/localhost/config
```
creating and updating records:
```
curl -X PATCH -H 'Content-Type: application/json' --data '{"rrsets": [{"name": "test1.yourdomain.com.","type": "A","ttl": 86400,"changetype": "REPLACE","records": [ {"content": "192.0.2.5", "disabled": false} ]},{"name": "test2.yourdomain.com.","type": "AAAA","ttl": 86400,"changetype": "REPLACE","records": [ {"content": "2001:db8::6", "disabled": false} ]}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://127.0.0.1:9191/api/v1/servers/localhost/zones/yourdomain.com.
```
getting domain:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'X-API-KEY: YUdDdGhQM0tMQWV5alpJ' -X GET http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com
```
list zone records:
```
curl -H 'Content-Type: application/json' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com
```
add new record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "REPLACE", "records": [ {"content": "192.0.5.4", "disabled": false } ] } ] }' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq .
```
update record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "REPLACE", "records": [ {"content": "192.0.2.5", "disabled": false, "name": "test.yourdomain.com.", "ttl": 86400, "type": "A"}]}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq .
```
delete record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "DELETE"}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq
```
### Generate ER diagram
```
apt-get install python-dev graphviz libgraphviz-dev pkg-config
```
```
pip install graphviz mysqlclient ERAlchemy
```
```
docker-compose up -d
```
```
source .env
```
```
eralchemy -i 'mysql://${PDA_DB_USER}:${PDA_DB_PASSWORD}@'$(docker inspect powerdns-admin-mysql|jq -jr '.[0].NetworkSettings.Networks.powerdnsadmin_default.IPAddress')':3306/powerdns_admin' -o /tmp/output.pdf
```
![dashboard](https://user-images.githubusercontent.com/6447444/44068603-0d2d81f6-9fa5-11e8-83af-14e2ad79e370.png)

View File

@ -1,133 +0,0 @@
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# BASIC APP CONFIG
SECRET_KEY = 'We are the world'
BIND_ADDRESS = '127.0.0.1'
PORT = 9191
# TIMEOUT - for large zones
TIMEOUT = 10
# LOG CONFIG
# - For docker, LOG_FILE=''
LOG_LEVEL = 'DEBUG'
LOG_FILE = 'logfile.log'
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
# UPLOAD DIRECTORY
UPLOAD_DIR = os.path.join(basedir, 'upload')
# DATABASE CONFIG
SQLA_DB_USER = 'pda'
SQLA_DB_PASSWORD = 'changeme'
SQLA_DB_HOST = '127.0.0.1'
SQLA_DB_PORT = 3306
SQLA_DB_NAME = 'pda'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# DATABASE - MySQL
SQLALCHEMY_DATABASE_URI = 'mysql://'+SQLA_DB_USER+':'+SQLA_DB_PASSWORD+'@'+SQLA_DB_HOST+':'+str(SQLA_DB_PORT)+'/'+SQLA_DB_NAME
# DATABASE - SQLite
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
# SAML Authentication
SAML_ENABLED = False
SAML_DEBUG = True
SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
##Example for ADFS Metadata-URL
SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
#Cache Lifetime in Seconds
SAML_METADATA_CACHE_LIFETIME = 1
# SAML SSO binding format to use
## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
#SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
## EntityID of the IdP to use. Only needed if more than one IdP is
## in the SAML_METADATA_URL
### Default: First (only) IdP in the SAML_METADATA_URL
### Example: https://idp.example.edu/idp
#SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
## NameID format to request
### Default: The SAML NameID Format in the metadata if present,
### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to use for Email address
### Default: email
### Example: urn:oid:0.9.2342.19200300.100.1.3
#SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
## Attribute to use for Given name
### Default: givenname
### Example: urn:oid:2.5.4.42
#SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
## Attribute to use for Surname
### Default: surname
### Example: urn:oid:2.5.4.4
#SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
## Split into Given name and Surname
## Useful if your IDP only gives a display name
### Default: none
### Example: http://schemas.microsoft.com/identity/claims/displayname
#SAML_ATTRIBUTE_NAME = 'http://schemas.microsoft.com/identity/claims/displayname'
## Attribute to use for username
### Default: Use NameID instead
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to get admin status from
### Default: Don't control admin with SAML attribute
### Example: https://example.edu/pdns-admin
### If set, look for the value 'true' to set a user as an administrator
### If not included in assertion, or set to something other than 'true',
### the user is set as a non-administrator user.
#SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
## Attribute to get group from
### Default: Don't use groups from SAML attribute
### Example: https://example.edu/pdns-admin-group
#SAML_ATTRIBUTE_GROUP = 'https://example.edu/pdns-admin'
## Group namem to get admin status from
### Default: Don't control admin with SAML group
### Example: https://example.edu/pdns-admin
#SAML_GROUP_ADMIN_NAME = 'powerdns-admin'
## Attribute to get group to account mappings from
### Default: None
### If set, the user will be added and removed from accounts to match
### what's in the login assertion if they are in the required group
#SAML_GROUP_TO_ACCOUNT_MAPPING = 'dev-admins=dev,prod-admins=prod'
## Attribute to get account names from
### Default: Don't control accounts with SAML attribute
### If set, the user will be added and removed from accounts to match
### what's in the login assertion. Accounts that don't exist will
### be created and the user added to them.
SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
SAML_SP_CONTACT_NAME = '<contact name>'
SAML_SP_CONTACT_MAIL = '<contact mail>'
#Configures if SAML tokens should be encrypted.
#If enabled a new app certificate will be generated on restart
SAML_SIGN_REQUEST = False
# Configures if you want to request the IDP to sign the message
# Default is True
#SAML_WANT_MESSAGE_SIGNED = True
#Use SAML standard logout mechanism retrieved from idp metadata
#If configured false don't care about SAML session on logout.
#Logout from PowerDNS-Admin only and keep SAML session authenticated.
SAML_LOGOUT = False
#Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
#for example redirect to google.com after successful saml logout
#SAML_LOGOUT_URL = 'https://google.com'

View File

@ -1,124 +1,94 @@
import os
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.abspath(os.path.abspath(os.path.dirname(__file__)))
# BASIC APP CONFIG
SECRET_KEY = 'changeme'
LOG_LEVEL = 'DEBUG'
LOG_FILE = os.path.join(basedir, 'logs/log.txt')
### BASIC APP CONFIG
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
# TIMEOUT - for large zones
TIMEOUT = 10
SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2'
BIND_ADDRESS = '0.0.0.0'
PORT = 9191
# UPLOAD DIR
UPLOAD_DIR = os.path.join(basedir, 'upload')
# DATABASE CONFIG FOR MYSQL
DB_HOST = os.environ.get('PDA_DB_HOST')
DB_PORT = os.environ.get('PDA_DB_PORT', 3306 )
DB_NAME = os.environ.get('PDA_DB_NAME')
DB_USER = os.environ.get('PDA_DB_USER')
DB_PASSWORD = os.environ.get('PDA_DB_PASSWORD')
#MySQL
SQLALCHEMY_DATABASE_URI = 'mysql://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+':'+ str(DB_PORT) + '/'+DB_NAME
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
### DATABASE CONFIG
SQLA_DB_USER = 'pda'
SQLA_DB_PASSWORD = 'changeme'
SQLA_DB_HOST = '127.0.0.1'
SQLA_DB_NAME = 'pda'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# SAML Authentication
### DATBASE - MySQL
# SQLALCHEMY_DATABASE_URI = 'mysql://' + SQLA_DB_USER + ':' + SQLA_DB_PASSWORD + '@' + SQLA_DB_HOST + '/' + SQLA_DB_NAME
### DATABSE - SQLite
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
# SAML Authnetication
SAML_ENABLED = False
SAML_DEBUG = True
SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
##Example for ADFS Metadata-URL
SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
#Cache Lifetime in Seconds
SAML_METADATA_CACHE_LIFETIME = 1
# SAML_DEBUG = True
# SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
# ##Example for ADFS Metadata-URL
# SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
# #Cache Lifetime in Seconds
# SAML_METADATA_CACHE_LIFETIME = 1
# SAML SSO binding format to use
## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
#SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
# # SAML SSO binding format to use
# ## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
# #SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
## EntityID of the IdP to use. Only needed if more than one IdP is
## in the SAML_METADATA_URL
### Default: First (only) IdP in the SAML_METADATA_URL
### Example: https://idp.example.edu/idp
#SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
## NameID format to request
### Default: The SAML NameID Format in the metadata if present,
### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
# ## EntityID of the IdP to use. Only needed if more than one IdP is
# ## in the SAML_METADATA_URL
# ### Default: First (only) IdP in the SAML_METADATA_URL
# ### Example: https://idp.example.edu/idp
# #SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
# ## NameID format to request
# ### Default: The SAML NameID Format in the metadata if present,
# ### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to use for Email address
### Default: email
### Example: urn:oid:0.9.2342.19200300.100.1.3
#SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
# ## Attribute to use for Email address
# ### Default: email
# ### Example: urn:oid:0.9.2342.19200300.100.1.3
# #SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
## Attribute to use for Given name
### Default: givenname
### Example: urn:oid:2.5.4.42
#SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
# ## Attribute to use for Given name
# ### Default: givenname
# ### Example: urn:oid:2.5.4.42
# #SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
## Attribute to use for Surname
### Default: surname
### Example: urn:oid:2.5.4.4
#SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
# ## Attribute to use for Surname
# ### Default: surname
# ### Example: urn:oid:2.5.4.4
# #SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
## Split into Given name and Surname
## Useful if your IDP only gives a display name
### Default: none
### Example: http://schemas.microsoft.com/identity/claims/displayname
#SAML_ATTRIBUTE_NAME = 'http://schemas.microsoft.com/identity/claims/displayname'
# ## Attribute to use for username
# ### Default: Use NameID instead
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to use for username
### Default: Use NameID instead
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
# ## Attribute to get admin status from
# ### Default: Don't control admin with SAML attribute
# ### Example: https://example.edu/pdns-admin
# ### If set, look for the value 'true' to set a user as an administrator
# ### If not included in assertion, or set to something other than 'true',
# ### the user is set as a non-administrator user.
# #SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
## Attribute to get admin status from
### Default: Don't control admin with SAML attribute
### Example: https://example.edu/pdns-admin
### If set, look for the value 'true' to set a user as an administrator
### If not included in assertion, or set to something other than 'true',
### the user is set as a non-administrator user.
#SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
# ## Attribute to get account names from
# ### Default: Don't control accounts with SAML attribute
# ### If set, the user will be added and removed from accounts to match
# ### what's in the login assertion. Accounts that don't exist will
# ### be created and the user added to them.
# SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
## Attribute to get group from
### Default: Don't use groups from SAML attribute
### Example: https://example.edu/pdns-admin-group
#SAML_ATTRIBUTE_GROUP = 'https://example.edu/pdns-admin'
## Group namem to get admin status from
### Default: Don't control admin with SAML group
### Example: https://example.edu/pdns-admin
#SAML_GROUP_ADMIN_NAME = 'powerdns-admin'
## Attribute to get group to account mappings from
### Default: None
### If set, the user will be added and removed from accounts to match
### what's in the login assertion if they are in the required group
#SAML_GROUP_TO_ACCOUNT_MAPPING = 'dev-admins=dev,prod-admins=prod'
## Attribute to get account names from
### Default: Don't control accounts with SAML attribute
### If set, the user will be added and removed from accounts to match
### what's in the login assertion. Accounts that don't exist will
### be created and the user added to them.
SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
SAML_SP_CONTACT_NAME = '<contact name>'
SAML_SP_CONTACT_MAIL = '<contact mail>'
#Configures if SAML tokens should be encrypted.
#If enabled a new app certificate will be generated on restart
SAML_SIGN_REQUEST = False
# Configures if you want to request the IDP to sign the message
# Default is True
#SAML_WANT_MESSAGE_SIGNED = True
#Use SAML standard logout mechanism retrieved from idp metadata
#If configured false don't care about SAML session on logout.
#Logout from PowerDNS-Admin only and keep SAML session authenticated.
SAML_LOGOUT = False
#Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
#for example redirect to google.com after successful saml logout
#SAML_LOGOUT_URL = 'https://google.com'
# SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
# SAML_SP_CONTACT_NAME = '<contact name>'
# SAML_SP_CONTACT_MAIL = '<contact mail>'
# #Cofigures if SAML tokens should be encrypted.
# #If enabled a new app certificate will be generated on restart
# SAML_SIGN_REQUEST = False
# #Use SAML standard logout mechanism retreived from idp metadata
# #If configured false don't care about SAML session on logout.
# #Logout from PowerDNS-Admin only and keep SAML session authenticated.
# SAML_LOGOUT = False
# #Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
# #for example redirect to google.com after successful saml logout
# #SAML_LOGOUT_URL = 'https://google.com'

View File

@ -1,4 +1,4 @@
# defaults for Docker image
# Defaults for Docker image
BIND_ADDRESS='0.0.0.0'
PORT=80
@ -6,11 +6,8 @@ legal_envvars = (
'SECRET_KEY',
'BIND_ADDRESS',
'PORT',
'TIMEOUT',
'LOG_LEVEL',
'LOG_FILE',
'SALT',
'UPLOAD_DIR',
'SQLALCHEMY_TRACK_MODIFICATIONS',
'SQLALCHEMY_DATABASE_URI',
'SAML_ENABLED',
@ -42,12 +39,12 @@ legal_envvars = (
legal_envvars_int = (
'PORT',
'TIMEOUT',
'SAML_METADATA_CACHE_LIFETIME',
)
legal_envvars_bool = (
'SQLALCHEMY_TRACK_MODIFICATIONS',
'HSTS_ENABLED',
'SAML_ENABLED',
'SAML_DEBUG',
'SAML_SIGN_REQUEST',
@ -66,5 +63,3 @@ for v in legal_envvars:
if v in legal_envvars_int:
ret = int(ret)
sys.modules[__name__].__dict__[v] = ret

View File

@ -1,131 +0,0 @@
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# BASIC APP CONFIG
SECRET_KEY = 'changeme'
LOG_LEVEL = 'DEBUG'
LOG_FILE = os.path.join(basedir, 'logs/log.txt')
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
# TIMEOUT - for large zones
TIMEOUT = 10
# UPLOAD DIR
UPLOAD_DIR = os.path.join(basedir, 'upload')
TEST_USER_PASSWORD = 'test'
TEST_USER = 'test'
TEST_ADMIN_USER = 'admin'
TEST_ADMIN_PASSWORD = 'admin'
TEST_USER_APIKEY = 'wewdsfewrfsfsdf'
TEST_ADMIN_APIKEY = 'nghnbnhtghrtert'
# DATABASE CONFIG FOR MYSQL
# DB_HOST = os.environ.get('PDA_DB_HOST')
# DB_PORT = os.environ.get('PDA_DB_PORT', 3306 )
# DB_NAME = os.environ.get('PDA_DB_NAME')
# DB_USER = os.environ.get('PDA_DB_USER')
# DB_PASSWORD = os.environ.get('PDA_DB_PASSWORD')
# #MySQL
# SQLALCHEMY_DATABASE_URI = 'mysql://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+':'+ str(DB_PORT) + '/'+DB_NAME
# SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
TEST_DB_LOCATION = '/tmp/testing.sqlite'
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(TEST_DB_LOCATION)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# SAML Authentication
SAML_ENABLED = False
SAML_DEBUG = True
SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
##Example for ADFS Metadata-URL
SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
#Cache Lifetime in Seconds
SAML_METADATA_CACHE_LIFETIME = 1
# SAML SSO binding format to use
## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
#SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
## EntityID of the IdP to use. Only needed if more than one IdP is
## in the SAML_METADATA_URL
### Default: First (only) IdP in the SAML_METADATA_URL
### Example: https://idp.example.edu/idp
#SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
## NameID format to request
### Default: The SAML NameID Format in the metadata if present,
### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to use for Email address
### Default: email
### Example: urn:oid:0.9.2342.19200300.100.1.3
#SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
## Attribute to use for Given name
### Default: givenname
### Example: urn:oid:2.5.4.42
#SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
## Attribute to use for Surname
### Default: surname
### Example: urn:oid:2.5.4.4
#SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
## Split into Given name and Surname
## Useful if your IDP only gives a display name
### Default: none
### Example: http://schemas.microsoft.com/identity/claims/displayname
#SAML_ATTRIBUTE_NAME = 'http://schemas.microsoft.com/identity/claims/displayname'
## Attribute to use for username
### Default: Use NameID instead
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to get admin status from
### Default: Don't control admin with SAML attribute
### Example: https://example.edu/pdns-admin
### If set, look for the value 'true' to set a user as an administrator
### If not included in assertion, or set to something other than 'true',
### the user is set as a non-administrator user.
#SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
## Attribute to get group from
### Default: Don't use groups from SAML attribute
### Example: https://example.edu/pdns-admin-group
#SAML_ATTRIBUTE_GROUP = 'https://example.edu/pdns-admin'
## Group namem to get admin status from
### Default: Don't control admin with SAML group
### Example: https://example.edu/pdns-admin
#SAML_GROUP_ADMIN_NAME = 'powerdns-admin'
## Attribute to get group to account mappings from
### Default: None
### If set, the user will be added and removed from accounts to match
### what's in the login assertion if they are in the required group
#SAML_GROUP_TO_ACCOUNT_MAPPING = 'dev-admins=dev,prod-admins=prod'
## Attribute to get account names from
### Default: Don't control accounts with SAML attribute
### If set, the user will be added and removed from accounts to match
### what's in the login assertion. Accounts that don't exist will
### be created and the user added to them.
SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
SAML_SP_CONTACT_NAME = '<contact name>'
SAML_SP_CONTACT_MAIL = '<contact mail>'
#Configures if SAML tokens should be encrypted.
#If enabled a new app certificate will be generated on restart
SAML_SIGN_REQUEST = False
# Configures if you want to request the IDP to sign the message
# Default is True
#SAML_WANT_MESSAGE_SIGNED = True
#Use SAML standard logout mechanism retrieved from idp metadata
#If configured false don't care about SAML session on logout.
#Logout from PowerDNS-Admin only and keep SAML session authenticated.
SAML_LOGOUT = False
#Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
#for example redirect to google.com after successful saml logout
#SAML_LOGOUT_URL = 'https://google.com'

View File

@ -4,12 +4,12 @@ services:
powerdns-admin:
build:
context: .
dockerfile: docker/PowerDNS-Admin/Dockerfile.test
dockerfile: docker-test/Dockerfile
args:
- ENVIRONMENT=test
image: powerdns-admin-test
env_file:
- ./env-test
- ./docker-test/env
container_name: powerdns-admin-test
mem_limit: 256M
memswap_limit: 256M
@ -33,7 +33,7 @@ services:
pdns-server:
build:
context: .
dockerfile: docker/PowerDNS-Admin/Dockerfile.pdns.test
dockerfile: docker-test/Dockerfile.pdns
image: pdns-server-test
ports:
- "5053:53"
@ -41,10 +41,10 @@ services:
networks:
- default
env_file:
- ./env-test
- ./docker-test/env
networks:
default:
volumes:
powerdns-admin-assets3:
powerdns-admin-assets3:

View File

@ -1,114 +1,20 @@
version: "2.1"
version: "3"
services:
powerdns-admin:
app:
build:
context: .
dockerfile: docker/PowerDNS-Admin/Dockerfile
args:
- ENVIRONMENT=${ENVIRONMENT}
image: powerdns-admin
container_name: powerdns-admin
mem_limit: 256M
memswap_limit: 256M
dockerfile: docker/Dockerfile
image: powerdns-admin:latest
container_name: powerdns_admin
ports:
- "9191:9191"
volumes:
# Code
- .:/powerdns-admin/
- "./configs/${ENVIRONMENT}.py:/powerdns-admin/config.py"
# Assets dir volume
- powerdns-admin-assets:/powerdns-admin/app/static
- powerdns-admin-assets2:/powerdns-admin/node_modules
- powerdns-admin-assets3:/powerdns-admin/logs
- ./app/static/custom:/powerdns-admin/app/static/custom
- "9191:80"
logging:
driver: json-file
options:
max-size: 50m
networks:
- default
environment:
- ENVIRONMENT=${ENVIRONMENT}
- PDA_DB_HOST=${PDA_DB_HOST}
- PDA_DB_NAME=${PDA_DB_NAME}
- PDA_DB_USER=${PDA_DB_USER}
- PDA_DB_PASSWORD=${PDA_DB_PASSWORD}
- PDA_DB_PORT=${PDA_DB_PORT}
- PDNS_HOST=${PDNS_HOST}
- PDNS_API_KEY=${PDNS_API_KEY}
- FLASK_APP=/powerdns-admin/app/__init__.py
depends_on:
powerdns-admin-mysql:
condition: service_healthy
powerdns-admin-mysql:
image: mysql/mysql-server:5.7
hostname: ${PDA_DB_HOST}
container_name: powerdns-admin-mysql
mem_limit: 256M
memswap_limit: 256M
expose:
- 3306
volumes:
- powerdns-admin-mysql-data:/var/lib/mysql
networks:
- default
environment:
- MYSQL_DATABASE=${PDA_DB_NAME}
- MYSQL_USER=${PDA_DB_USER}
- MYSQL_PASSWORD=${PDA_DB_PASSWORD}
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
timeout: 10s
retries: 5
pdns-server:
image: psitrax/powerdns
hostname: ${PDNS_HOST}
ports:
- "5053:53"
- "5053:53/udp"
networks:
- default
command: --api=yes --api-key=${PDNS_API_KEY} --webserver-address=0.0.0.0 --webserver-allow-from=0.0.0.0/0
environment:
- MYSQL_HOST=${PDNS_DB_HOST}
- MYSQL_USER=${PDNS_DB_USER}
- MYSQL_PASS=${PDNS_DB_PASSWORD}
- PDNS_API_KEY=${PDNS_API_KEY}
- PDNS_WEBSERVER_ALLOW_FROM=${PDNS_WEBSERVER_ALLOW_FROM}
depends_on:
pdns-mysql:
condition: service_healthy
pdns-mysql:
image: mysql/mysql-server:5.7
hostname: ${PDNS_DB_HOST}
container_name: ${PDNS_DB_HOST}
mem_limit: 256M
memswap_limit: 256M
expose:
- 3306
volumes:
- powerdns-mysql-data:/var/lib/mysql
networks:
- default
environment:
- MYSQL_DATABASE=${PDNS_DB_NAME}
- MYSQL_USER=${PDNS_DB_USER}
- MYSQL_PASSWORD=${PDNS_DB_PASSWORD}
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
timeout: 10s
retries: 5
networks:
default:
volumes:
powerdns-mysql-data:
powerdns-admin-mysql-data:
powerdns-admin-assets:
powerdns-admin-assets2:
powerdns-admin-assets3:
- SQLALCHEMY_DATABASE_URI=mysql://pda:changeme@host.docker.internal/pda
- GUINCORN_TIMEOUT=60
- GUNICORN_WORKERS=2
- GUNICORN_LOGLEVEL=DEBUG

33
docker-test/Dockerfile Normal file
View File

@ -0,0 +1,33 @@
FROM debian:stretch-slim
LABEL maintainer="k@ndk.name"
ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8
RUN apt-get update -y \
&& apt-get install -y --no-install-recommends apt-transport-https locales locales-all python3-pip python3-setuptools python3-dev curl libsasl2-dev libldap2-dev libssl-dev libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev build-essential libmariadb-dev-compat \
&& curl -sL https://deb.nodesource.com/setup_10.x | bash - \
&& curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list \
&& apt-get update -y \
&& apt-get install -y nodejs yarn \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
# We copy just the requirements.txt first to leverage Docker cache
COPY ./requirements.txt /app/requirements.txt
WORKDIR /app
RUN pip3 install -r requirements.txt
COPY . /app
COPY ./docker/entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/entrypoint.sh
ENV FLASK_APP=powerdnsadmin/__init__.py
RUN yarn install --pure-lockfile --production \
&& yarn cache clean \
&& flask assets build
COPY ./docker-test/wait-for-pdns.sh /opt
RUN chmod u+x /opt/wait-for-pdns.sh
CMD ["/opt/wait-for-pdns.sh", "/usr/local/bin/pytest","--capture=no","-vv"]

View File

@ -2,8 +2,8 @@ FROM ubuntu:latest
RUN apt-get update && apt-get install -y pdns-backend-sqlite3 pdns-server sqlite3
COPY ./docker/PowerDNS-Admin/pdns.sqlite.sql /data/pdns.sql
ADD ./docker/PowerDNS-Admin/start.sh /data/
COPY ./docker-test/pdns.sqlite.sql /data/pdns.sql
ADD ./docker-test/start.sh /data/
RUN rm -f /etc/powerdns/pdns.d/pdns.simplebind.conf
RUN rm -f /etc/powerdns/pdns.d/bind.conf

View File

@ -7,4 +7,4 @@ PDNS_PROTO=http
PDNS_PORT=8081
PDNS_HOST=pdns-server
PDNS_API_KEY=changeme
PDNS_WEBSERVER_ALLOW_FROM=0.0.0.0/0
PDNS_WEBSERVER_ALLOW_FROM=0.0.0.0/0

33
docker/Dockerfile Normal file
View File

@ -0,0 +1,33 @@
FROM debian:stretch-slim
LABEL maintainer="k@ndk.name"
ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8
RUN apt-get update -y \
&& apt-get install -y --no-install-recommends apt-transport-https locales locales-all python3-pip python3-setuptools python3-dev curl libsasl2-dev libldap2-dev libssl-dev libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev build-essential libmariadb-dev-compat \
&& curl -sL https://deb.nodesource.com/setup_10.x | bash - \
&& curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list \
&& apt-get update -y \
&& apt-get install -y nodejs yarn \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
# We copy just the requirements.txt first to leverage Docker cache
COPY ./requirements.txt /app/requirements.txt
WORKDIR /app
RUN pip3 install -r requirements.txt
COPY . /app
COPY ./docker/entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/entrypoint.sh
ENV FLASK_APP=powerdnsadmin/__init__.py
RUN yarn install --pure-lockfile --production \
&& yarn cache clean \
&& flask assets build
EXPOSE 80/tcp
ENTRYPOINT ["entrypoint.sh"]
CMD ["gunicorn","powerdnsadmin:create_app()"]

View File

@ -1,48 +0,0 @@
FROM ubuntu:16.04
MAINTAINER Khanh Ngo "k@ndk.name"
ARG ENVIRONMENT=development
ENV ENVIRONMENT=${ENVIRONMENT}
WORKDIR /powerdns-admin
RUN apt-get update -y
RUN apt-get install -y apt-transport-https
RUN apt-get install -y locales locales-all
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN apt-get install -y python3-pip python3-dev supervisor curl mysql-client
# Install node 10.x
RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
RUN apt-get install -y nodejs
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list
# Install yarn
RUN apt-get update -y
RUN apt-get install -y yarn
# Install Netcat for DB healthcheck
RUN apt-get install -y netcat
# lib for building mysql db driver
RUN apt-get install -y libmysqlclient-dev
# lib for building ldap and ssl-based application
RUN apt-get install -y libsasl2-dev libldap2-dev libssl-dev
# lib for building python3-saml
RUN apt-get install -y libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev pkg-config
COPY ./requirements.txt /powerdns-admin/requirements.txt
RUN pip3 install -r requirements.txt
ADD ./supervisord.conf /etc/supervisord.conf
ADD . /powerdns-admin/
COPY ./configs/${ENVIRONMENT}.py /powerdns-admin/config.py
COPY ./docker/PowerDNS-Admin/entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -1,46 +0,0 @@
FROM ubuntu:16.04
MAINTAINER Khanh Ngo "k@ndk.name"
ARG ENVIRONMENT=development
ENV ENVIRONMENT=${ENVIRONMENT}
WORKDIR /powerdns-admin
RUN apt-get update -y
RUN apt-get install -y apt-transport-https
RUN apt-get install -y locales locales-all
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN apt-get install -y python3-pip python3-dev supervisor curl mysql-client
RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
RUN apt-get install -y nodejs
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list
# Install yarn
RUN apt-get update -y
RUN apt-get install -y yarn
# Install Netcat for DB healthcheck
RUN apt-get install -y netcat
# lib for building mysql db driver
RUN apt-get install -y libmysqlclient-dev
# lib for building ldap and ssl-based application
RUN apt-get install -y libsasl2-dev libldap2-dev libssl-dev
# lib for building python3-saml
RUN apt-get install -y libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev pkg-config
COPY ./requirements.txt /powerdns-admin/requirements.txt
COPY ./docker/PowerDNS-Admin/wait-for-pdns.sh /opt
RUN chmod u+x /opt/wait-for-pdns.sh
RUN pip3 install -r requirements.txt
CMD ["/opt/wait-for-pdns.sh", "/usr/local/bin/pytest","--capture=no","-vv"]

View File

@ -1,71 +0,0 @@
#!/bin/bash
set -o errexit
set -o pipefail
# == Vars
#
DB_MIGRATION_DIR='/powerdns-admin/migrations'
if [[ -z ${PDNS_PROTO} ]];
then PDNS_PROTO="http"
fi
if [[ -z ${PDNS_PORT} ]];
then PDNS_PORT=8081
fi
# Wait for us to be able to connect to MySQL before proceeding
echo "===> Waiting for $PDA_DB_HOST MySQL service"
until nc -zv \
$PDA_DB_HOST \
$PDA_DB_PORT;
do
echo "MySQL ($PDA_DB_HOST) is unavailable - sleeping"
sleep 1
done
echo "===> DB management"
# Go in Workdir
cd /powerdns-admin
if [ ! -d "${DB_MIGRATION_DIR}" ]; then
echo "---> Running DB Init"
flask db init --directory ${DB_MIGRATION_DIR}
flask db migrate -m "Init DB" --directory ${DB_MIGRATION_DIR}
flask db upgrade --directory ${DB_MIGRATION_DIR}
./init_data.py
else
echo "---> Running DB Migration"
set +e
flask db migrate -m "Upgrade DB Schema" --directory ${DB_MIGRATION_DIR}
flask db upgrade --directory ${DB_MIGRATION_DIR}
set -e
fi
echo "===> Update PDNS API connection info"
# initial setting if not available in the DB
mysql -h${PDA_DB_HOST} -u${PDA_DB_USER} -p${PDA_DB_PASSWORD} -P${PDA_DB_PORT} ${PDA_DB_NAME} -e "INSERT INTO setting (name, value) SELECT * FROM (SELECT 'pdns_api_url', '${PDNS_PROTO}://${PDNS_HOST}:${PDNS_PORT}') AS tmp WHERE NOT EXISTS (SELECT name FROM setting WHERE name = 'pdns_api_url') LIMIT 1;"
mysql -h${PDA_DB_HOST} -u${PDA_DB_USER} -p${PDA_DB_PASSWORD} -P${PDA_DB_PORT} ${PDA_DB_NAME} -e "INSERT INTO setting (name, value) SELECT * FROM (SELECT 'pdns_api_key', '${PDNS_API_KEY}') AS tmp WHERE NOT EXISTS (SELECT name FROM setting WHERE name = 'pdns_api_key') LIMIT 1;"
# update pdns api setting if .env is changed.
mysql -h${PDA_DB_HOST} -u${PDA_DB_USER} -p${PDA_DB_PASSWORD} -P${PDA_DB_PORT} ${PDA_DB_NAME} -e "UPDATE setting SET value='${PDNS_PROTO}://${PDNS_HOST}:${PDNS_PORT}' WHERE name='pdns_api_url';"
mysql -h${PDA_DB_HOST} -u${PDA_DB_USER} -p${PDA_DB_PASSWORD} -P${PDA_DB_PORT} ${PDA_DB_NAME} -e "UPDATE setting SET value='${PDNS_API_KEY}' WHERE name='pdns_api_key';"
echo "===> Assets management"
echo "---> Running Yarn"
chown -R www-data:www-data /powerdns-admin/app/static
chown -R www-data:www-data /powerdns-admin/node_modules
su -s /bin/bash -c 'yarn install --pure-lockfile' www-data
echo "---> Running Flask assets"
chown -R www-data:www-data /powerdns-admin/logs
su -s /bin/bash -c 'flask assets build' www-data
echo "===> Start supervisor"
/usr/bin/supervisord -c /etc/supervisord.conf

View File

@ -1,32 +0,0 @@
FROM debian:stretch-slim
LABEL maintainer="k@ndk.name"
ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8
RUN apt-get update -y \
&& apt-get install -y --no-install-recommends apt-transport-https locales locales-all python3-pip python3-setuptools python3-dev curl libsasl2-dev libldap2-dev libssl-dev libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev build-essential libmariadb-dev-compat \
&& curl -sL https://deb.nodesource.com/setup_10.x | bash - \
&& curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list \
&& apt-get update -y \
&& apt-get install -y nodejs yarn \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt/powerdns-admin
COPY . .
RUN pip3 install -r requirements.txt \
&& pip3 install psycopg2-binary \
&& yarn install --pure-lockfile \
&& cp config_template.py config.py \
&& flask assets build \
&& rm config.py
COPY ./docker/Production/entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/entrypoint.sh
ENV FLASK_APP=app/__init__.py
EXPOSE 80/tcp
ENTRYPOINT ["entrypoint.sh"]
CMD ["gunicorn","app:app"]

View File

@ -1,15 +1,13 @@
#!/bin/bash
set -Eeuo pipefail
cd /opt/powerdns-admin
cd /app
GUNICORN_TIMEOUT="${GUINCORN_TIMEOUT:-120}"
GUNICORN_WORKERS="${GUNICORN_WORKERS:-4}"
GUNICORN_LOGLEVEL="${GUNICORN_LOGLEVEL:-info}"
BIND_ADDRESS="${BIND_ADDRESS:-0.0.0.0:80}"
if [ ! -f ./config.py ]; then
cat ./config_template.py ./docker/Production/config_docker.py > ./config.py
fi
cat ./powerdnsadmin/default_config.py ./configs/docker_config.py > ./powerdnsadmin/docker_config.py
GUNICORN_ARGS="-t ${GUNICORN_TIMEOUT} --workers ${GUNICORN_WORKERS} --bind ${BIND_ADDRESS} --log-level ${GUNICORN_LOGLEVEL}"
if [ "$1" == gunicorn ]; then

105
docs/API.md Normal file
View File

@ -0,0 +1,105 @@
### API Usage
1. Run docker image docker-compose up, go to UI http://localhost:9191, at http://localhost:9191/swagger is swagger API specification
2. Click to register user, type e.g. user: admin and password: admin
3. Login to UI in settings enable allow domain creation for users, now you can create and manage domains with admin account and also ordinary users
4. Encode your user and password to base64, in our example we have user admin and password admin so in linux cmd line we type:
```
$ echo -n 'admin:admin'|base64
YWRtaW46YWRtaW4=
```
we use generated output in basic authentication, we authenticate as user,
with basic authentication, we can create/delete/get zone and create/delete/get/update apikeys
creating domain:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46YWRtaW4=' -X POST http://localhost:9191/api/v1/pdnsadmin/zones --data '{"name": "yourdomain.com.", "kind": "NATIVE", "nameservers": ["ns1.mydomain.com."]}'
```
creating apikey which has Administrator role, apikey can have also User role, when creating such apikey you have to specify also domain for which apikey is valid:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46YWRtaW4=' -X POST http://localhost:9191/api/v1/pdnsadmin/apikeys --data '{"description": "masterkey","domains":[], "role": "Administrator"}'
```
call above will return response like this:
```
[{"description": "samekey", "domains": [], "role": {"name": "Administrator", "id": 1}, "id": 2, "plain_key": "aGCthP3KLAeyjZI"}]
```
we take plain_key and base64 encode it, this is the only time we can get API key in plain text and save it somewhere:
```
$ echo -n 'aGCthP3KLAeyjZI'|base64
YUdDdGhQM0tMQWV5alpJ
```
We can use apikey for all calls specified in our API specification (it tries to follow powerdns API 1:1, only tsigkeys endpoints are not yet implemented), don't forget to specify Content-Type!
getting powerdns configuration:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'X-API-KEY: YUdDdGhQM0tMQWV5alpJ' -X GET http://localhost:9191/api/v1/servers/localhost/config
```
creating and updating records:
```
curl -X PATCH -H 'Content-Type: application/json' --data '{"rrsets": [{"name": "test1.yourdomain.com.","type": "A","ttl": 86400,"changetype": "REPLACE","records": [ {"content": "192.0.2.5", "disabled": false} ]},{"name": "test2.yourdomain.com.","type": "AAAA","ttl": 86400,"changetype": "REPLACE","records": [ {"content": "2001:db8::6", "disabled": false} ]}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://127.0.0.1:9191/api/v1/servers/localhost/zones/yourdomain.com.
```
getting domain:
```
curl -L -vvv -H 'Content-Type: application/json' -H 'X-API-KEY: YUdDdGhQM0tMQWV5alpJ' -X GET http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com
```
list zone records:
```
curl -H 'Content-Type: application/json' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com
```
add new record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "REPLACE", "records": [ {"content": "192.0.5.4", "disabled": false } ] } ] }' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq .
```
update record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "REPLACE", "records": [ {"content": "192.0.2.5", "disabled": false, "name": "test.yourdomain.com.", "ttl": 86400, "type": "A"}]}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq .
```
delete record:
```
curl -H 'Content-Type: application/json' -X PATCH --data '{"rrsets": [ {"name": "test.yourdomain.com.", "type": "A", "ttl": 86400, "changetype": "DELETE"}]}' -H 'X-API-Key: YUdDdGhQM0tMQWV5alpJ' http://localhost:9191/api/v1/servers/localhost/zones/yourdomain.com | jq
```
### Generate ER diagram
```
apt-get install python-dev graphviz libgraphviz-dev pkg-config
```
```
pip install graphviz mysqlclient ERAlchemy
```
```
docker-compose up -d
```
```
source .env
```
```
eralchemy -i 'mysql://${PDA_DB_USER}:${PDA_DB_PASSWORD}@'$(docker inspect powerdns-admin-mysql|jq -jr '.[0].NetworkSettings.Networks.powerdnsadmin_default.IPAddress')':3306/powerdns_admin' -o /tmp/output.pdf
```

View File

@ -18,7 +18,8 @@ depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'avatar')
with op.batch_alter_table('user') as batch_op:
batch_op.drop_column('avatar')
# ### end Alembic commands ###

View File

@ -2,58 +2,9 @@ import os
from werkzeug.contrib.fixers import ProxyFix
from flask import Flask
from flask_seasurf import SeaSurf
from flask_sslify import SSLify
from .lib import utils
# from flask_login import LoginManager
# from flask_sqlalchemy import SQLAlchemy as SA
# from flask_migrate import Migrate
# from authlib.flask.client import OAuth as AuthlibOAuth
# from sqlalchemy.exc import OperationalError
# from app.assets import assets
# ### SYBPATCH ###
# from app.customboxes import customBoxes
### SYBPATCH ###
# subclass SQLAlchemy to enable pool_pre_ping
# class SQLAlchemy(SA):
# def apply_pool_defaults(self, app, options):
# SA.apply_pool_defaults(self, app, options)
# options["pool_pre_ping"] = True
# app = Flask(__name__)
# app.config.from_object('config')
# app.wsgi_app = ProxyFix(app.wsgi_app)
# csrf = SeaSurf(app)
# assets.init_app(app)
# #### CONFIGURE LOGGER ####
# from app.lib.log import logger
# logging = logger('powerdns-admin', app.config['LOG_LEVEL'], app.config['LOG_FILE']).config()
# login_manager = LoginManager()
# login_manager.init_app(app)
# db = SQLAlchemy(app) # database
# migrate = Migrate(app, db) # flask-migrate
# authlib_oauth_client = AuthlibOAuth(app) # authlib oauth
# if app.config.get('SAML_ENABLED') and app.config.get('SAML_ENCRYPT'):
# from app.lib import certutil
# if not certutil.check_certificate():
# certutil.create_self_signed_cert()
# from app import models
# from app.blueprints.api import api_blueprint
# app.register_blueprint(api_blueprint, url_prefix='/api/v1')
# from app import views
def create_app(config=None):
from . import models, routes, services
@ -63,9 +14,6 @@ def create_app(config=None):
# Proxy
app.wsgi_app = ProxyFix(app.wsgi_app)
# HSTS enabled
_sslify = SSLify(app)
# CSRF protection
csrf = SeaSurf(app)
csrf.exempt(routes.index.dyndns_checkip)
@ -80,10 +28,14 @@ def create_app(config=None):
csrf.exempt(routes.api.api_zone_forward)
csrf.exempt(routes.api.api_create_zone)
# Load default configuration
app.config.from_object('powerdnsadmin.default_config')
# Load config from env variables if using docker
if os.path.exists(os.path.join(app.root_path, 'docker_config.py')):
app.config.from_object('powerdnsadmin.docker_config')
else:
# Load default configuration
app.config.from_object('powerdnsadmin.default_config')
# Load environment configuration
# Load config file from FLASK_CONF env variable
if 'FLASK_CONF' in os.environ:
app.config.from_envvar('FLASK_CONF')
@ -94,6 +46,11 @@ def create_app(config=None):
elif config.endswith('.py'):
app.config.from_pyfile(config)
# HSTS
if app.config.get('HSTS_ENABLED'):
from flask_sslify import SSLify
_sslify = SSLify(app)
# Load app's components
assets.init_app(app)
models.init_app(app)

View File

@ -1,102 +1,25 @@
import os
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.abspath(os.path.abspath(os.path.dirname(__file__)))
### BASIC APP CONFIG
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
# BASIC APP CONFIG
SECRET_KEY = 'We are the world'
SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2'
BIND_ADDRESS = '0.0.0.0'
PORT = 9191
HSTS_ENABLED = False
# TIMEOUT - for large zones
TIMEOUT = 10
# LOG CONFIG
# - For docker, LOG_FILE=''
LOG_LEVEL = 'DEBUG'
# DATABASE CONFIG
### DATABASE CONFIG
SQLA_DB_USER = 'pda'
SQLA_DB_PASSWORD = 'changeme'
SQLA_DB_HOST = '127.0.0.1'
SQLA_DB_NAME = 'pda'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# DATBASE - MySQL
### DATBASE - MySQL
SQLALCHEMY_DATABASE_URI = 'mysql://'+SQLA_DB_USER+':'+SQLA_DB_PASSWORD+'@'+SQLA_DB_HOST+'/'+SQLA_DB_NAME
# DATABSE - SQLite
#SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
### DATABSE - SQLite
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
# SAML Authnetication
SAML_ENABLED = False
# SAML_DEBUG = True
# SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
# ##Example for ADFS Metadata-URL
# SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
# #Cache Lifetime in Seconds
# SAML_METADATA_CACHE_LIFETIME = 1
# # SAML SSO binding format to use
# ## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
# #SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
# ## EntityID of the IdP to use. Only needed if more than one IdP is
# ## in the SAML_METADATA_URL
# ### Default: First (only) IdP in the SAML_METADATA_URL
# ### Example: https://idp.example.edu/idp
# #SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
# ## NameID format to request
# ### Default: The SAML NameID Format in the metadata if present,
# ### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
# ## Attribute to use for Email address
# ### Default: email
# ### Example: urn:oid:0.9.2342.19200300.100.1.3
# #SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
# ## Attribute to use for Given name
# ### Default: givenname
# ### Example: urn:oid:2.5.4.42
# #SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
# ## Attribute to use for Surname
# ### Default: surname
# ### Example: urn:oid:2.5.4.4
# #SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
# ## Attribute to use for username
# ### Default: Use NameID instead
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
# ## Attribute to get admin status from
# ### Default: Don't control admin with SAML attribute
# ### Example: https://example.edu/pdns-admin
# ### If set, look for the value 'true' to set a user as an administrator
# ### If not included in assertion, or set to something other than 'true',
# ### the user is set as a non-administrator user.
# #SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
# ## Attribute to get account names from
# ### Default: Don't control accounts with SAML attribute
# ### If set, the user will be added and removed from accounts to match
# ### what's in the login assertion. Accounts that don't exist will
# ### be created and the user added to them.
# SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
# SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
# SAML_SP_CONTACT_NAME = '<contact name>'
# SAML_SP_CONTACT_MAIL = '<contact mail>'
# #Cofigures if SAML tokens should be encrypted.
# #If enabled a new app certificate will be generated on restart
# SAML_SIGN_REQUEST = False
# #Use SAML standard logout mechanism retreived from idp metadata
# #If configured false don't care about SAML session on logout.
# #Logout from PowerDNS-Admin only and keep SAML session authenticated.
# SAML_LOGOUT = False
# #Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
# #for example redirect to google.com after successful saml logout
# #SAML_LOGOUT_URL = 'https://google.com'

View File

@ -12,44 +12,6 @@ from datetime import datetime, timedelta
from threading import Thread
from .certutil import KEY_FILE, CERT_FILE
# import logging as logger
# logging = logger.getLogger(__name__)
# if app.config['SAML_ENABLED']:
# from onelogin.saml2.auth import OneLogin_Saml2_Auth
# from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser
# idp_timestamp = datetime(1970, 1, 1)
# idp_data = None
# if 'SAML_IDP_ENTITY_ID' in app.config:
# idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None), required_sso_binding=app.config['SAML_IDP_SSO_BINDING'])
# else:
# idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None))
# if idp_data is None:
# print('SAML: IDP Metadata initial load failed')
# exit(-1)
# idp_timestamp = datetime.now()
# def get_idp_data():
# global idp_data, idp_timestamp
# lifetime = timedelta(minutes=app.config['SAML_METADATA_CACHE_LIFETIME'])
# if idp_timestamp+lifetime < datetime.now():
# background_thread = Thread(target=retrieve_idp_data)
# background_thread.start()
# return idp_data
# def retrieve_idp_data():
# global idp_data, idp_timestamp
# if 'SAML_IDP_SSO_BINDING' in app.config:
# new_idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None), required_sso_binding=app.config['SAML_IDP_SSO_BINDING'])
# else:
# new_idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(app.config['SAML_METADATA_URL'], entity_id=app.config.get('SAML_IDP_ENTITY_ID', None))
# if new_idp_data is not None:
# idp_data = new_idp_data
# idp_timestamp = datetime.now()
# print("SAML: IDP Metadata successfully retrieved from: " + app.config['SAML_METADATA_URL'])
# else:
# print("SAML: IDP Metadata could not be retrieved")
def auth_from_url(url):
@ -115,6 +77,8 @@ def fetch_json(remote_url, method='GET', data=None, params=None, headers=None, t
if r.status_code == 204:
return {}
elif r.status_code == 409:
return {'error': 'Resource already exists or conflict', 'http_code': r.status_code}
try:
assert ('json' in r.headers['content-type'])

View File

@ -24,7 +24,7 @@ bravado_config = {
'use_models': True,
}
dir_path = os.path.dirname(os.path.abspath(__file__))
dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
spec_path = os.path.join(dir_path, "swagger-spec.yaml")
spec_dict = get_swagger_spec(spec_path)
spec = Spec.from_dict(spec_dict, config=bravado_config)

View File

@ -1,26 +0,0 @@
import sys
import os
import re
import ldap
import ldap.filter
import base64
import bcrypt
import itertools
import traceback
import pyotp
import dns.reversename
import dns.inet
import dns.name
import pytimeparse
import random
import string
from ast import literal_eval
from datetime import datetime
from urllib.parse import urljoin
from distutils.util import strtobool
from distutils.version import StrictVersion
from flask_login import AnonymousUserMixin
from app import db, app
from app.lib import utils
from app.lib.log import logging

View File

@ -243,6 +243,8 @@ class Domain(db.Model):
data=post_data)
if 'error' in jdata.keys():
current_app.logger.error(jdata['error'])
if jdata.get('http_code') == 409:
return {'status': 'error', 'msg': 'Domain already exists'}
return {'status': 'error', 'msg': jdata['error']}
else:
current_app.logger.info(

View File

@ -281,7 +281,7 @@ def api_get_apikeys(domain_name):
if current_user.role.name not in ['Administrator', 'Operator']:
if domain_name:
msg = "Check if domain {0} exists and \
is allowed for user." .format(domain_name)
is allowed for user." .format(domain_name)
current_app.logger.debug(msg)
apikeys = current_user.get_apikeys(domain_name)
@ -502,14 +502,18 @@ def api_create_zone(server_id):
@api_bp.route('/servers/<string:server_id>/zones', methods=['GET'])
@apikey_auth
def api_get_zones(server_id):
if g.apikey.role.name not in ['Administrator', 'Operator']:
domain_obj_list = g.apikey.domains
if server_id == 'powerdns-admin':
if g.apikey.role.name not in ['Administrator', 'Operator']:
domain_obj_list = g.apikey.domains
else:
domain_obj_list = Domain.query.all()
return json.dumps(domain_schema.dump(domain_obj_list)), 200
else:
domain_obj_list = Domain.query.all()
return json.dumps(domain_schema.dump(domain_obj_list)), 200
resp = helper.forward_request()
return resp.content, resp.status_code, resp.headers.items()
#endpoint to snychronize Domains in background
# The endpoint to snychronize Domains in background
@api_bp.route('/sync_domains', methods=['GET'])
@apikey_auth
def sync_domains():

View File

@ -1,4 +1,4 @@
from flask import Blueprint, render_template, make_response, url_for, current_app, request, jsonify
from flask import Blueprint, render_template, make_response, url_for, current_app, request, jsonify, redirect
from flask_login import login_required, current_user
from sqlalchemy import not_, or_
@ -120,7 +120,7 @@ def domains_custom(boxId):
def dashboard():
if not Setting().get('pdns_api_url') or not Setting().get(
'pdns_api_key') or not Setting().get('pdns_version'):
return redirect(url_for('admin_setting_pdns'))
return redirect(url_for('admin.setting_pdns'))
BG_DOMAIN_UPDATE = Setting().get('bg_domain_updates')
if not BG_DOMAIN_UPDATE:

View File

@ -111,7 +111,7 @@ def add():
if ' ' in domain_name or not domain_name or not domain_type:
return render_template('errors/400.html',
msg="Please correct your input"), 400
msg="Please enter a valid domain name"), 400
if domain_type == 'slave':
if request.form.getlist('domain_master_address'):

View File

@ -25,8 +25,12 @@
<i class="fa fa-warning text-yellow"></i> Oops! Bad request
</h3>
<p>
The server refused to process your request and return a 400 error.
You may <a href="{{ url_for('dashboard.dashboard') }}">return to the dashboard</a>.
{% if msg %}
{{ msg }}
{% else %}
The server refused to process your request and return a 400 error.
{% endif %}
<br/>You may <a href="{{ url_for('dashboard.dashboard') }}">return to the dashboard</a>.
</p>
</div>
<!-- /.error-content -->

View File

@ -10,7 +10,7 @@
##############################################################
### Imports
from app.models import Domain
from powerdnsadmin.models.domain import Domain
from config import BG_DOMAIN_UPDATES
import sys

View File

@ -1,4 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore