195 rel cleanup (#268)

Fixes #195 pre release cleanup. This does a LOT, clippy, formatting, and much much more. It fixes a lot of parts of the book, improves server config and more.
This commit is contained in:
Firstyear 2020-06-18 10:30:42 +10:00 committed by GitHub
parent d47d4fed0a
commit fec28e03e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
84 changed files with 2345 additions and 2081 deletions

View file

@ -5,3 +5,4 @@
Makefile
target
test.db
kanidmd/sampledata

View file

@ -1 +1,8 @@
## Author
William Brown: william@blackhats.net.au
## Contributors
Jake
Charcol
Pando85

74
Cargo.lock generated
View file

@ -818,12 +818,13 @@ dependencies = [
[[package]]
name = "crossbeam-queue"
version = "0.2.2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab6bffe714b6bb07e42f201352c34f51fefd355ace793f9e638ebd52d23f98d2"
checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
dependencies = [
"cfg-if",
"crossbeam-utils",
"maybe-uninit",
]
[[package]]
@ -1287,9 +1288,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.13"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71"
checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909"
dependencies = [
"libc",
]
@ -1386,9 +1387,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idlset"
version = "0.1.8"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a98b4868ca8a8a8bcf3943fe1cfd8b27c17d398d00c0b8aa78aee4c2b5131eb"
checksum = "74315e000e3c87cf70bcf127e1ac9836a2695a447a18c9feb226bb27321a7105"
dependencies = [
"serde",
"serde_derive",
@ -1524,6 +1525,7 @@ dependencies = [
"tokio",
"tokio-openssl",
"tokio-util 0.2.0",
"toml",
"uuid",
"zxcvbn",
]
@ -1760,9 +1762,9 @@ dependencies = [
[[package]]
name = "miniz_oxide"
version = "0.3.6"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5"
checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435"
dependencies = [
"adler32",
]
@ -1930,9 +1932,9 @@ dependencies = [
[[package]]
name = "num-integer"
version = "0.1.42"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b"
dependencies = [
"autocfg",
"num-traits",
@ -1940,9 +1942,9 @@ dependencies = [
[[package]]
name = "num-iter"
version = "0.1.40"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfb0800a0291891dd9f4fe7bd9c19384f98f7fbe0cd0f39a2c6b88b9868bbc00"
checksum = "7a6e6b7c748f995c4c29c5f5ae0248536e04a5739927c74ec0fa564805094b9f"
dependencies = [
"autocfg",
"num-integer",
@ -1963,9 +1965,9 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.11"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
dependencies = [
"autocfg",
]
@ -1994,9 +1996,9 @@ checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d"
[[package]]
name = "oorandom"
version = "11.1.1"
version = "11.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94af325bc33c7f60191be4e2c984d48aaa21e2854f473b85398344b60c9b6358"
checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c"
[[package]]
name = "openssl"
@ -2020,9 +2022,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
[[package]]
name = "openssl-src"
version = "111.9.0+1.1.1g"
version = "111.10.0+1.1.1g"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2dbe10ddd1eb335aba3780eb2eaa13e1b7b441d2562fd962398740927f39ec4"
checksum = "47cd4a96d49c3abf4cac8e8a80cba998a030c75608f158fb1c5f609772f265e6"
dependencies = [
"cc",
]
@ -2111,18 +2113,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "pin-project"
version = "0.4.20"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e75373ff9037d112bb19bc61333a06a159eaeb217660dcfbea7d88e1db823919"
checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
version = "0.4.20"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10b4b44893d3c370407a1d6a5cfde7c41ae0478e31c516c85f67eb3adc51be6d"
checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7"
dependencies = [
"proc-macro2",
"quote",
@ -2199,9 +2201,9 @@ checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4"
[[package]]
name = "proc-macro-nested"
version = "0.1.5"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0afe1bd463b9e9ed51d0e0f0b50b6b146aec855c56fd182bb242388710a9b6de"
checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a"
[[package]]
name = "proc-macro2"
@ -2372,9 +2374,9 @@ checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
[[package]]
name = "remove_dir_all"
version = "0.5.2"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
"winapi 0.3.8",
]
@ -2580,9 +2582,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
version = "1.0.111"
version = "1.0.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d"
checksum = "736aac72d1eafe8e5962d1d1c3d99b0df526015ba40915cb3c49d042e92ec243"
dependencies = [
"serde_derive",
]
@ -2599,9 +2601,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.111"
version = "1.0.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250"
checksum = "bf0343ce212ac0d3d6afd9391ac8e9c9efe06b533c8d33f660f6390cc4093f57"
dependencies = [
"proc-macro2",
"quote",
@ -2610,9 +2612,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.54"
version = "1.0.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe4c1f6427dbc29329c6288e9e748b8b8e0ea42a0aab733e887fa72c22e965d"
checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226"
dependencies = [
"itoa",
"ryu",
@ -2803,9 +2805,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.30"
version = "1.0.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93a56fabc59dce20fe48b6c832cc249c713e7ed88fa28b0ee0a3bfcaae5fe4e2"
checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6"
dependencies = [
"proc-macro2",
"quote",
@ -3228,9 +3230,9 @@ dependencies = [
[[package]]
name = "vcpkg"
version = "0.2.9"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55d1e41d56121e07f1e223db0a4def204e45c85425f6a16d462fd07c8d10d74c"
checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
[[package]]
name = "version_check"

View file

@ -11,6 +11,7 @@
- [RADIUS](./radius.md)
- [Password Quality and Badlisting](./password_quality.md)
- [Recycle Bin](./recycle_bin.md)
- [Legacy Applications -- LDAP](./ldap.md)
-----------
[Why TLS?](./why_tls.md)

View file

@ -102,7 +102,7 @@ Further, groups that are considered "idm_high_privilege" can NOT be managed
by the standard "idm_group_manage_priv" group.
Management of high privilege accounts and groups is granted through the
the "hp" variants of all privileges. For example:
the "hp" variants of all privileges. A non-conclusive list:
* idm_hp_account_read_priv
* idm_hp_account_manage_priv

View file

@ -16,8 +16,8 @@ To take the backup (assuming our docker environment) you first need to stop the
docker stop <container name>
docker run --rm -i -t -v kanidmd:/data -v kanidmd_backups:/backup \
kanidm/server:latest /sbin/kanidmd backup \
/backup/kanidm.backup.json -D /data/kanidm.db
kanidm/server:latest /sbin/kanidmd backup -c /data/server.toml \
/backup/kanidm.backup.json
docker start <container name>
You can then restart your instance. DO NOT modify the backup.json as it may introduce
@ -27,8 +27,8 @@ To restore from the backup:
docker stop <container name>
docker run --rm -i -t -v kanidmd:/data -v kanidmd_backups:/backup \
kanidm/server:latest /sbin/kanidmd restore \
/backup/kanidm.backup.json -D /data/kanidm.db
kanidm/server:latest /sbin/kanidmd restore -c /data/server.toml \
/backup/kanidm.backup.json
docker start <container name>
That's it!
@ -62,8 +62,8 @@ you can then rename the domain with the commands as follows:
docker stop <container name>
docker run --rm -i -t -v kandimd:/data \
kanidm/server:latest /sbin/kanidmd domain_name_change \
-D /data/kanidm.db -n idm.new.domain.name
kanidm/server:latest /sbin/kanidmd domain_name_change -c /data/server.toml \
-n idm.new.domain.name
docker start <container name>
@ -89,8 +89,7 @@ definitions (this works even though the schema is in the same database!)
docker stop <container name>
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest /sbin/kanidmd reindex \
-D /data/kanidm.db
kanidm/server:latest /sbin/kanidmd reindex -c /data/server.toml
docker start <container name>
Generally, reindexing is a rare action and should not normally be required.
@ -108,8 +107,7 @@ You can run a verification with:
docker stop <container name>
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest /sbin/kanidmd verify \
-D /data/kanidm.db
kanidm/server:latest /sbin/kanidmd verify -c /data/server.toml
docker start <container name>
If you have errors, please contact the project to help support you to resolve these.
@ -125,12 +123,11 @@ above.
kanidm raw create -H https://localhost:8443 -C ../insecure/ca.pem -D idm_admin example.create.group.json
# Apply a json stateful modification to all entries matching a filter
kanidm raw modify -H https://localhost:8443 -C ../insecure/ca.pem -D admin '{"Or": [ {"Eq": ["name", "idm_person_account_create_priv"]}, {"Eq": ["name", "idm_service_account_create_priv"]}, {"Eq": ["name", "idm_account_write_priv"]}, {"Eq": ["name", "idm_group_write_priv"]}, {"Eq": ["name", "idm_people_write_priv"]}, {"Eq": ["name", "idm_group_create_priv"]} ]}' example.modify.idm_admin.json
kanidm raw modify -H https://localhost:8443 -C ../insecure/ca.pem -D idm_admin '{"Eq": ["name", "idm_admins"]}' example.modify.idm_admin.json
kanidm raw modify -H https://localhost:8443 -C ../insecure/ca.pem -D admin '{"or": [ {"eq": ["name", "idm_person_account_create_priv"]}, {"eq": ["name", "idm_service_account_create_priv"]}, {"eq": ["name", "idm_account_write_priv"]}, {"eq": ["name", "idm_group_write_priv"]}, {"eq": ["name", "idm_people_write_priv"]}, {"eq": ["name", "idm_group_create_priv"]} ]}' example.modify.idm_admin.json
kanidm raw modify -H https://localhost:8443 -C ../insecure/ca.pem -D idm_admin '{"eq": ["name", "idm_admins"]}' example.modify.idm_admin.json
# Search and show the database representations
kanidm raw search -H https://localhost:8443 -C ../insecure/ca.pem -D admin '{"Eq": ["name", "idm_admin"]}'
> Entry { attrs: {"class": ["account", "memberof", "object"], "displayname": ["IDM Admin"], "memberof": ["idm_people_read_priv", "idm_people_write_priv", "idm_group_write_priv", "idm_account_read_priv", "idm_account_write_priv", "idm_service_account_create_priv", "idm_person_account_create_priv", "idm_high_privilege"], "name": ["idm_admin"], "uuid": ["bb852c38-8920-4932-a551-678253cae6ff"]} }
kanidm raw search -H https://localhost:8443 -C ../insecure/ca.pem -D admin '{"eq": ["name", "idm_admin"]}'
# Delete all entries matching a filter
kanidm raw delete -H https://localhost:8443 -C ../insecure/ca.pem -D idm_admin '{"Eq": ["name", "test_account_delete_me"]}'
kanidm raw delete -H https://localhost:8443 -C ../insecure/ca.pem -D idm_admin '{"eq": ["name", "test_account_delete_me"]}'

View file

@ -14,6 +14,7 @@ Kanidm currently supports:
Using zypper you can add the repository with:
zypper ar obs://home:firstyear:kanidm home_firstyear_kanidm
zypper mr -f home_firstyear_kanidm
Then you need to referesh your metadata and install the clients.
@ -22,13 +23,14 @@ Then you need to referesh your metadata and install the clients.
### Fedora
On fedora you need to add the repos into the correct directory
On fedora you need to add the repos into the correct directory.
cd /etc/yum.repos.d
30:
wget https://download.opensuse.org/repositories/home:/firstyear:/kanidm/Fedora_30/home:firstyear:kanidm.repo
31:
wget https://download.opensuse.org/repositories/home:/firstyear:/kanidm/Fedora_31/home:firstyear:kanidm.repo
wget https://download.opensuse.org/repositories/home:/firstyear:/kanidm/Fedora_Rawhide/home:firstyear:kanidm.repo
> **NOTICE:**
> While this is a rawhide repository, as kanidm is staticly linked, it works correctly on fedora
> 31 and above.
Now you can add the packages:
@ -39,8 +41,7 @@ Now you can add the packages:
After you check out the source (see github), navigate to:
cd kanidm_tools
cargo build
cargo install --path ./
cargo install --path .
## Check the tools work
@ -50,8 +51,7 @@ with the -C parameter:
kanidm self whoami -C ../path/to/ca.pem -H https://localhost:8443 --name anonymous
kanidm self whoami -H https://localhost:8443 --name anonymous
Now you can take some time to look at what commands are available - things may still be rough so
please ask for help at anytime.
Now you can take some time to look at what commands are available - please ask for help at anytime.
## Kandim configuration

View file

@ -17,15 +17,33 @@ that mounts the volume such as:
OR for a shell into the volume:
docker run --rm -i -t -v kanidmd:/data opensuse/leap:latest /bin/sh
You will also need a config file in `/data/server.toml`. It's contents should be as follows:
# The webserver bind address. Will use HTTPS if tls_* is provided.
# Defaults to "127.0.0.1:8443"
bindaddress = "127.0.0.1:8443"
# The read-only ldap server bind address. will use LDAPS if tls_* is provided.
# Defaults to "" (disabled)
# ldapbindaddress = "127.0.0.1:3636"
# The path to the kanidm database.
db_path = "/data/kanidm.db"
# TLS ca, certificate and key in pem format. All three must be commented, or present
# tls_ca = "/data/ca.pem"
# tls_cert = "/data/cert.pem"
# tls_key = "/data/key.pem"
# The log level of the server. May be default, verbose, perfbasic, perffull
# Defaults to "default"
# log_level = "default"
Then you can setup the initial admin account and initialise the database into your volume.
docker run --rm -i -t -v kanidmd:/data kanidm/server:latest /sbin/kanidmd recover_account -D /data/kanidm.db -n admin
docker run --rm -i -t -v kanidmd:/data kanidm/server:latest /sbin/kanidmd recover_account -c /data/server.toml -n admin
You then want to set your domain name so that spn's are generated correctly.
docker run --rm -i -t -v kanidmd:/data kanidm/server:latest /sbin/kanidmd domain_name_change -D /data/kanidm.db -n idm.example.com
docker run --rm -i -t -v kanidmd:/data kanidm/server:latest /sbin/kanidmd domain_name_change -c /data/server.toml -n idm.example.com
Now we can run the server so that it can accept connections.
Now we can run the server so that it can accept connections. This defaults to using `-c /data/server.toml`
docker run -p 8443:8443 -v kanidmd:/data kanidm/server:latest

View file

@ -3,9 +3,9 @@
Kanidm is an identity management server, acting as an authority on accounts and authorisation
within a technical environment.
> **WARNING:** This project is still under heavy development, and has not had a production ready
> release yet. It may lose your data, be offline for some periods of time, or otherwise cause
> disruptions if you aren't ready.
> **NOTICE:**
> This is a pre-release project. While all effort has been made to ensure no dataloss
> or security flaws, you should still be careful when using this in your environment.
The intent of the Kanidm project is:

134
kanidm_book/src/ldap.md Normal file
View file

@ -0,0 +1,134 @@
# Legacy Applications -- LDAP
While many applications can support systems like SAML or OAuth, many do not. LDAP
has been the "lingua franca" of authentication for many years, with almost
every application in the world being able to search and bind to LDAP. As there
are still many of these in the world, Kanidm has the ability to host a read-only
LDAP interface.
> **WARNING** The LDAP server in Kanidm is not RFC compliant. This
> is intentional, as Kanidm wants to cover the common use case (simple bind and search).
## What is LDAP
LDAP is a protocol to read data from a directory of information. It is not
a server, but a way to communicate to a server. There are many famous LDAP
implementations such as Active Directory, 389 Directory Server, DSEE,
FreeIPA and many others. Because it is a standard, applications can use
an LDAP client library to authenticate users to LDAP, given "one account" for
many applications - an IDM just like Kanidm!
## Data Mapping
Kanidm is not able to be mapped 100% to LDAP's objects. This is because LDAP
types are simple key-values on objects which are all UTF8 strings or subsets
of based on validation (matching) rules. Kanidm internally implements complex
datatypes such as taging on SSH keys, or multi-value credentials. These can not
be represented in LDAP.
As well many of the structures in Kanidm don't correlate closely to LDAP. For example
Kanidm only has a gidnumber, where LDAP's schema's define uidnumber and gidnumber.
Entries in the database also have a specific name in LDAP, related to their path
in the directory tree. Kanidm is a flat model, so we have to emulate some tree like
elements, and ignore others.
For this reason, when you search the ldap interface, Kanidm will make some mappinng
decisions.
* Queries requesting objectClass/EntryUUID will be mapped to class/uuid
* Entry attributes to LDAP may be renamed for presentation to LDAP clients (ie class to ObjectClass)
* The domain_info object becomes the suffix root.
* All other entries are direct subordinates of the domain_info for DN purposes
* DN's are generated from the attributes naming attributes
* Bind DN's can be remapped and rewritten, and may not even be a DN during bind.
* The Kanidm domain name is used to generate the basedn.
These decisions were made to make the path as simple and effective as possible,
relying more on the kanidm query and filter system than attempting to generate a tree like
representation of data. As almost all clients can use filters for entry selection
we don't believe this is a limitation for consuming applications.
## Security
### TLS
StartTLS is not supported due to security risks. LDAPS is the only secure method
of communicating to any LDAP server. Kanidm if configured with certificates will
use them for LDAPS (and will not listen on a plaintext LDAP port). If no certificates exist
Kanidm will listen on a plaintext LDAP port, and you MUST TLS terminate in front
of the Kanidm system to secure data and authentication.
### Access Controls
LDAP only supports password authentication. As LDAP is used heavily in posix environments
the LDAP bind for any DN will use it's configured posix password.
As the posix password is not eqivalent in strength to the primary credentials of Kanidm
(which may be MFA), the LDAP bind does not grant rights to elevated read permissions.
All binds, have the permissions of "Anonymous" (even if the anonymous account is locked).
## Server Configuration
To configure Kanidm to provide LDAP you add the argument to the server.toml configuration:
ldapbindaddress = "127.0.0.1:3636"
You should configure TLS certificates and keys as usual - LDAP will re-use the webserver TLS
material.
## Example
Given a default install with domain "example.com" the configured LDAP dn will be "dc=example,dc=com".
This can be queried with:
cargo run -- server -D kanidm.db -C ca.pem -c cert.pem -k key.pem -b 127.0.0.1:8443 -l 127.0.0.1:3636
> LDAPTLS_CACERT=ca.pem ldapsearch -H ldaps://127.0.0.1:3636 -b 'dc=example,dc=com' -x '(name=test1)'
# test1@example.com, example.com
dn: spn=test1@example.com,dc=example,dc=com
objectclass: account
objectclass: memberof
objectclass: object
objectclass: person
displayname: Test User
memberof: spn=group240@example.com,dc=example,dc=com
name: test1
spn: test1@example.com
entryuuid: 22a65b6c-80c8-4e1a-9b76-3f3afdff8400
It is recommended that client applications filter accounts that can login with '(class=account)'
and groups with '(class=group)'. If possible, group membership is defined in rfc2307bis or
Active Directory style. This means groups are determined from the "memberof" attribute which contains
a dn to a group.
LDAP binds can use any unique identifier of the account. The following are all valid bind dn's for
the object listed above (if it was a posix account that is).
ldapwhoami ... -x -D 'name=test1'
ldapwhoami ... -x -D 'spn=test1@example.com'
ldapwhoami ... -x -D 'test1@example.com'
ldapwhoami ... -x -D 'test1'
ldapwhoami ... -x -D '22a65b6c-80c8-4e1a-9b76-3f3afdff8400'
ldapwhoami ... -x -D 'spn=test1@example.com,dc=example,dc=com'
ldapwhoami ... -x -D 'name=test1,dc=example,dc=com'
Most LDAP clients are very picky about TLS, and can be very hard to debug or display errors. For example
these commands:
ldapsearch -H ldaps://127.0.0.1:3636 -b 'dc=example,dc=com' -x '(name=test1)'
ldapsearch -H ldap://127.0.0.1:3636 -b 'dc=example,dc=com' -x '(name=test1)'
ldapsearch -H ldap://127.0.0.1:3389 -b 'dc=example,dc=com' -x '(name=test1)'
All give the same error:
ldap_sasl_bind(SIMPLE): Can't contact LDAP server (-1)
This is despite the fact:
* The first command is a certificate validation error
* The second is a missing ldaps on a TLS port
* The third is an incorrect port
To diganose errors like this you may need "-d 1" for your ldap commands or client.

View file

@ -14,6 +14,9 @@ can be small, helping to reduce the attack surface of the machine.
We recommend you install the client daemon from your system package manager.
zypper in kanidm-unixd-clients
dnf install kanidm-unixd-clients
You can check the daemon is running on your Linux system with
# systemctl status kanidm_unixd
@ -83,10 +86,10 @@ pam config in a way that will cause you to be unable to authenticate to your mac
To configure PAM on suse you must module four files:
/etc/pam.d/common-account-pc
/etc/pam.d/common-auth-pc
/etc/pam.d/common-password-pc
/etc/pam.d/common-session-pc
/etc/pam.d/common-account
/etc/pam.d/common-auth
/etc/pam.d/common-password
/etc/pam.d/common-session
Each of these controls one of the four stages of pam. The content should look like:
@ -118,6 +121,9 @@ Each of these controls one of the four stages of pam. The content should look li
session optional pam_umask.so
session optional pam_env.so
### Fedora
TBD
## Troubleshooting

View file

@ -48,7 +48,7 @@ This means, Kanidm stores RADIUS credentials in the database is cleartext.
We believe this is a reasonable decision and is a low risk to security as:
* The access controls around radius secret by default are "strong", limited to only self-account read and radius-server read.
* As RADIUS credentials are seperate to the primary account credentials, and have no other rights, their disclosure is not going to lead to a fully compromise account.
* As RADIUS credentials are seperate to the primary account credentials, and have no other rights, their disclosure is not going to lead to a full compromise account.
* Having the credentials in cleartext allows a better user experience as clients can view the credentials at anytime to enroll further devices.
## Account Credential Configuration

View file

@ -49,6 +49,8 @@ If the account has ssh public keys you should see them listed, one per line.
To configure servers to accept these keys, you must change their /etc/ssh/sshd_config to
contain the lines:
PubkeyAuthentication yes
UsePAM yes
AuthorizedKeysCommand /usr/bin/kanidm_ssh_authorizedkeys %u
AuthorizedKeysCommandUser nobody
@ -57,7 +59,24 @@ Restart sshd, and then attempt to authenticate with the keys.
It's highly recommended you keep your client configuration and sshd_configuration in a configuration
management tool such as salt or ansible.
### Direct configuration
> **NOTICE:**
> With a working SSH key setup, you should also consider adding the following
> sshd_config options as hardening.
PermitRootLogin no
PasswordAuthentication no
PermitEmptyPasswords no
GSSAPIAuthentication no
KerberosAuthentication no
### Direct communication configuration
In this mode, the authorised keys commands will contact kanidm directly.
> **NOTICE:**
> As kanidm is contacted directly there is no ssh public key cache. Any network
> outage or communication loss may prevent you accessing your systems. You should
> only use this version if you have a requirement for it.
The kanidm_ssh_authorizedkeys_direct command is part of the kanidm-clients package, so should be installed
on the servers.
@ -73,6 +92,8 @@ If the account has ssh public keys you should see them listed, one per line.
To configure servers to accept these keys, you must change their /etc/ssh/sshd_config to
contain the lines:
PubkeyAuthentication yes
UsePAM yes
AuthorizedKeysCommand /usr/bin/kanidm_ssh_authorizedkeys_direct -D anonymous %u
AuthorizedKeysCommandUser nobody

View file

@ -45,7 +45,7 @@ impl KanidmAsyncClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().await.unwrap();
Ok(r)
@ -83,7 +83,7 @@ impl KanidmAsyncClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().await.unwrap();
Ok(r)
@ -111,13 +111,13 @@ impl KanidmAsyncClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().await.unwrap();
Ok(r)
}
async fn perform_delete_request(&self, dest: &str) -> Result<(), ClientError> {
async fn perform_delete_request(&self, dest: &str) -> Result<bool, ClientError> {
let dest = format!("{}{}", self.addr, dest);
let response = self
.client
@ -137,16 +137,14 @@ impl KanidmAsyncClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
}
Ok(())
let r: bool = response.json().await.unwrap();
Ok(r)
}
pub async fn auth_step_init(
&self,
ident: &str,
appid: Option<&str>,
) -> Result<AuthState, ClientError> {
pub async fn auth_step_init(&self, ident: &str) -> Result<AuthState, ClientError> {
let auth_init = AuthRequest {
step: AuthStep::Init(ident.to_string(), appid.map(|s| s.to_string())),
step: AuthStep::Init(ident.to_string()),
};
let r: Result<AuthResponse, _> = self.perform_post_request("/v1/auth", auth_init).await;
@ -158,7 +156,7 @@ impl KanidmAsyncClient {
ident: &str,
password: &str,
) -> Result<UserAuthToken, ClientError> {
let _state = match self.auth_step_init(ident, None).await {
let _state = match self.auth_step_init(ident).await {
Ok(s) => s,
Err(e) => return Err(e),
};
@ -180,8 +178,9 @@ impl KanidmAsyncClient {
}
pub async fn auth_anonymous(&self) -> Result<UserAuthToken, ClientError> {
// TODO: Check state for auth continue contains anonymous.
let _state = match self.auth_step_init("anonymous", None).await {
// TODO #251: Check state for auth continue contains anonymous.
// #251 will remove the need for this check.
let _state = match self.auth_step_init("anonymous").await {
Ok(s) => s,
Err(e) => return Err(e),
};
@ -235,17 +234,21 @@ impl KanidmAsyncClient {
.await
}
pub async fn idm_account_delete(&self, id: &str) -> Result<(), ClientError> {
pub async fn idm_account_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(["/v1/account/", id].concat().as_str())
.await
}
pub async fn idm_group_delete(&self, id: &str) -> Result<(), ClientError> {
pub async fn idm_group_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(["/v1/group/", id].concat().as_str())
.await
}
pub async fn idm_account_unix_cred_put(&self, id: &str, cred: &str) -> Result<(), ClientError> {
pub async fn idm_account_unix_cred_put(
&self,
id: &str,
cred: &str,
) -> Result<bool, ClientError> {
let req = SingleStringRequest {
value: cred.to_string(),
};
@ -256,7 +259,7 @@ impl KanidmAsyncClient {
.await
}
pub async fn idm_account_unix_cred_delete(&self, id: &str) -> Result<(), ClientError> {
pub async fn idm_account_unix_cred_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(["/v1/account/", id, "/_unix/_credential"].concat().as_str())
.await
}
@ -277,7 +280,7 @@ impl KanidmAsyncClient {
&self,
id: &str,
members: Vec<&str>,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
let m: Vec<_> = members.iter().map(|v| (*v).to_string()).collect();
self.perform_post_request(["/v1/group/", id, "/_attr/member"].concat().as_str(), m)
.await

View file

@ -73,7 +73,7 @@ impl KanidmClientBuilder {
fn parse_certificate(ca_path: &str) -> Result<reqwest::Certificate, ()> {
let mut buf = Vec::new();
// TODO: Handle these errors better, or at least provide diagnostics?
// TODO #253: Handle these errors better, or at least provide diagnostics?
let mut f = File::open(ca_path).map_err(|_| ())?;
f.read_to_end(&mut buf).map_err(|_| ())?;
reqwest::Certificate::from_pem(&buf).map_err(|_| ())
@ -314,7 +314,7 @@ impl KanidmClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().unwrap();
Ok(r)
@ -348,7 +348,7 @@ impl KanidmClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().unwrap();
Ok(r)
@ -373,13 +373,13 @@ impl KanidmClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
}
// TODO: What about errors
// TODO #253: What about errors
let r: T = response.json().unwrap();
Ok(r)
}
fn perform_delete_request(&self, dest: &str) -> Result<(), ClientError> {
fn perform_delete_request(&self, dest: &str) -> Result<bool, ClientError> {
let dest = format!("{}{}", self.addr, dest);
let response = self
.client
@ -398,7 +398,9 @@ impl KanidmClient {
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
}
Ok(())
let r: bool = response.json().unwrap();
Ok(r)
}
// whoami
@ -421,8 +423,8 @@ impl KanidmClient {
// auth
pub fn auth_anonymous(&self) -> Result<UserAuthToken, ClientError> {
// TODO: Check state for auth continue contains anonymous.
let _state = match self.auth_step_init("anonymous", None) {
// TODO #251: Check state for auth continue contains anonymous.
let _state = match self.auth_step_init("anonymous") {
Ok(s) => s,
Err(e) => return Err(e),
};
@ -448,7 +450,7 @@ impl KanidmClient {
ident: &str,
password: &str,
) -> Result<UserAuthToken, ClientError> {
let _state = match self.auth_step_init(ident, None) {
let _state = match self.auth_step_init(ident) {
Ok(s) => s,
Err(e) => return Err(e),
};
@ -475,7 +477,7 @@ impl KanidmClient {
password: &str,
totp: u32,
) -> Result<UserAuthToken, ClientError> {
let _state = match self.auth_step_init(ident, None) {
let _state = match self.auth_step_init(ident) {
Ok(s) => s,
Err(e) => return Err(e),
};
@ -507,42 +509,38 @@ impl KanidmClient {
}
// create
pub fn create(&self, entries: Vec<Entry>) -> Result<(), ClientError> {
pub fn create(&self, entries: Vec<Entry>) -> Result<bool, ClientError> {
let c = CreateRequest { entries };
let r: Result<OperationResponse, _> = self.perform_post_request("/v1/raw/create", c);
r.map(|_| ())
r.map(|_| true)
}
// modify
pub fn modify(&self, filter: Filter, modlist: ModifyList) -> Result<(), ClientError> {
pub fn modify(&self, filter: Filter, modlist: ModifyList) -> Result<bool, ClientError> {
let mr = ModifyRequest { filter, modlist };
let r: Result<OperationResponse, _> = self.perform_post_request("/v1/raw/modify", mr);
r.map(|_| ())
r.map(|_| true)
}
// delete
pub fn delete(&self, filter: Filter) -> Result<(), ClientError> {
pub fn delete(&self, filter: Filter) -> Result<bool, ClientError> {
let dr = DeleteRequest { filter };
let r: Result<OperationResponse, _> = self.perform_post_request("/v1/raw/delete", dr);
r.map(|_| ())
r.map(|_| true)
}
// === idm actions here ==
pub fn idm_account_set_password(&self, cleartext: String) -> Result<(), ClientError> {
pub fn idm_account_set_password(&self, cleartext: String) -> Result<bool, ClientError> {
let s = SingleStringRequest { value: cleartext };
let r: Result<OperationResponse, _> =
self.perform_post_request("/v1/self/_credential/primary/set_password", s);
r.map(|_| ())
r.map(|_| true)
}
pub fn auth_step_init(
&self,
ident: &str,
appid: Option<&str>,
) -> Result<AuthState, ClientError> {
pub fn auth_step_init(&self, ident: &str) -> Result<AuthState, ClientError> {
let auth_init = AuthRequest {
step: AuthStep::Init(ident.to_string(), appid.map(|s| s.to_string())),
step: AuthStep::Init(ident.to_string()),
};
let r: Result<AuthResponse, _> = self.perform_post_request("/v1/auth", auth_init);
@ -562,12 +560,12 @@ impl KanidmClient {
self.perform_get_request(format!("/v1/group/{}/_attr/member", id).as_str())
}
pub fn idm_group_set_members(&self, id: &str, members: Vec<&str>) -> Result<(), ClientError> {
pub fn idm_group_set_members(&self, id: &str, members: Vec<&str>) -> Result<bool, ClientError> {
let m: Vec<_> = members.iter().map(|v| (*v).to_string()).collect();
self.perform_put_request(format!("/v1/group/{}/_attr/member", id).as_str(), m)
}
pub fn idm_group_add_members(&self, id: &str, members: Vec<&str>) -> Result<(), ClientError> {
pub fn idm_group_add_members(&self, id: &str, members: Vec<&str>) -> Result<bool, ClientError> {
let m: Vec<_> = members.iter().map(|v| (*v).to_string()).collect();
self.perform_post_request(format!("/v1/group/{}/_attr/member", id).as_str(), m)
}
@ -578,7 +576,7 @@ impl KanidmClient {
}
*/
pub fn idm_group_purge_members(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_group_purge_members(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/group/{}/_attr/member", id).as_str())
}
@ -590,16 +588,16 @@ impl KanidmClient {
&self,
id: &str,
gidnumber: Option<u32>,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
let gx = GroupUnixExtend { gidnumber };
self.perform_post_request(format!("/v1/group/{}/_unix", id).as_str(), gx)
}
pub fn idm_group_delete(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_group_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/group/{}", id).as_str())
}
pub fn idm_group_create(&self, name: &str) -> Result<(), ClientError> {
pub fn idm_group_create(&self, name: &str) -> Result<bool, ClientError> {
let mut new_group = Entry {
attrs: BTreeMap::new(),
};
@ -607,7 +605,7 @@ impl KanidmClient {
.attrs
.insert("name".to_string(), vec![name.to_string()]);
self.perform_post_request("/v1/group", new_group)
.map(|_: OperationResponse| ())
.map(|_: OperationResponse| true)
}
// ==== accounts
@ -615,7 +613,7 @@ impl KanidmClient {
self.perform_get_request("/v1/account")
}
pub fn idm_account_create(&self, name: &str, dn: &str) -> Result<(), ClientError> {
pub fn idm_account_create(&self, name: &str, dn: &str) -> Result<bool, ClientError> {
let mut new_acct = Entry {
attrs: BTreeMap::new(),
};
@ -626,17 +624,17 @@ impl KanidmClient {
.attrs
.insert("displayname".to_string(), vec![dn.to_string()]);
self.perform_post_request("/v1/account", new_acct)
.map(|_: OperationResponse| ())
.map(|_: OperationResponse| true)
}
pub fn idm_account_set_displayname(&self, id: &str, dn: &str) -> Result<(), ClientError> {
pub fn idm_account_set_displayname(&self, id: &str, dn: &str) -> Result<bool, ClientError> {
self.perform_put_request(
format!("/v1/account/{}/_attr/displayname", id).as_str(),
vec![dn.to_string()],
)
}
pub fn idm_account_delete(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_account_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/account/{}", id).as_str())
}
@ -662,7 +660,7 @@ impl KanidmClient {
&self,
id: &str,
pw: &str,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
self.perform_put_request(
format!("/v1/account/{}/_attr/password_import", id).as_str(),
vec![pw.to_string()],
@ -709,14 +707,14 @@ impl KanidmClient {
id: &str,
otp: u32,
session: Uuid,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
let r = SetCredentialRequest::TOTPVerify(session, otp);
let res: Result<SetCredentialResponse, ClientError> = self.perform_put_request(
format!("/v1/account/{}/_credential/primary", id).as_str(),
r,
);
match res {
Ok(SetCredentialResponse::Success) => Ok(()),
Ok(SetCredentialResponse::Success) => Ok(true),
Ok(SetCredentialResponse::TOTPCheck(u, s)) => Err(ClientError::TOTPVerifyFailed(u, s)),
Ok(_) => Err(ClientError::EmptyResponse),
Err(e) => Err(e),
@ -737,7 +735,7 @@ impl KanidmClient {
self.perform_post_request(format!("/v1/account/{}/_radius", id).as_str(), ())
}
pub fn idm_account_radius_credential_delete(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_account_radius_credential_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/account/{}/_radius", id).as_str())
}
@ -750,7 +748,7 @@ impl KanidmClient {
id: &str,
gidnumber: Option<u32>,
shell: Option<&str>,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
let ux = AccountUnixExtend {
shell: shell.map(|s| s.to_string()),
gidnumber,
@ -762,7 +760,7 @@ impl KanidmClient {
self.perform_get_request(format!("/v1/account/{}/_unix/_token", id).as_str())
}
pub fn idm_account_unix_cred_put(&self, id: &str, cred: &str) -> Result<(), ClientError> {
pub fn idm_account_unix_cred_put(&self, id: &str, cred: &str) -> Result<bool, ClientError> {
let req = SingleStringRequest {
value: cred.to_string(),
};
@ -772,7 +770,7 @@ impl KanidmClient {
)
}
pub fn idm_account_unix_cred_delete(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_account_unix_cred_delete(&self, id: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/account/{}/_unix/_credential", id).as_str())
}
@ -796,12 +794,12 @@ impl KanidmClient {
id: &str,
tag: &str,
pubkey: &str,
) -> Result<(), ClientError> {
) -> Result<bool, ClientError> {
let sk = (tag.to_string(), pubkey.to_string());
self.perform_post_request(format!("/v1/account/{}/_ssh_pubkeys", id).as_str(), sk)
}
pub fn idm_account_person_extend(&self, id: &str) -> Result<(), ClientError> {
pub fn idm_account_person_extend(&self, id: &str) -> Result<bool, ClientError> {
self.perform_post_request(format!("/v1/account/{}/_person/_extend", id).as_str(), ())
}
@ -819,7 +817,7 @@ impl KanidmClient {
self.perform_get_request(format!("/v1/account/{}/_ssh_pubkeys/{}", id, tag).as_str())
}
pub fn idm_account_delete_ssh_pubkey(&self, id: &str, tag: &str) -> Result<(), ClientError> {
pub fn idm_account_delete_ssh_pubkey(&self, id: &str, tag: &str) -> Result<bool, ClientError> {
self.perform_delete_request(format!("/v1/account/{}/_ssh_pubkeys/{}", id, tag).as_str())
}
@ -839,7 +837,7 @@ impl KanidmClient {
}
// pub fn idm_domain_put_attr
pub fn idm_domain_set_ssid(&self, id: &str, ssid: &str) -> Result<(), ClientError> {
pub fn idm_domain_set_ssid(&self, id: &str, ssid: &str) -> Result<bool, ClientError> {
self.perform_put_request(
format!("/v1/domain/{}/_attr/domain_ssid", id).as_str(),
vec![ssid.to_string()],

View file

@ -2,6 +2,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc;
use std::thread;
use kanidm::audit::LogLevel;
use kanidm::config::{Configuration, IntegrationTestConfig};
use kanidm::core::create_server_core;
use kanidm_client::{KanidmClient, KanidmClientBuilder};
@ -33,6 +34,7 @@ pub fn run_test(test_fn: fn(KanidmClient) -> ()) {
config.address = format!("127.0.0.1:{}", port);
config.secure_cookies = false;
config.integration_test_config = Some(int_config);
config.log_level = Some(LogLevel::Quiet as u32);
thread::spawn(move || {
// Spawn a thread for the test runner, this should have a unique
// port....

View file

@ -58,10 +58,10 @@ fn create_user(rsclient: &KanidmClient, id: &str, group_name: &str) -> () {
rsclient.idm_account_create(id, "Deeeeemo").unwrap();
// Create group and add to user to test read attr: member_of
match rsclient.idm_group_get(&group_name).unwrap() {
Some(_) => (),
let _ = match rsclient.idm_group_get(&group_name).unwrap() {
Some(_) => true,
None => rsclient.idm_group_create(&group_name).unwrap(),
}
};
rsclient
.idm_group_add_members(&group_name, vec![id])
@ -286,7 +286,7 @@ fn test_default_entries_rbac_account_managers() {
static PRIVATE_DATA_ATTRS: [&str; 1] = ["legalname"];
test_read_attrs(&rsclient, "test", &PRIVATE_DATA_ATTRS, false);
test_write_attrs(&rsclient, "test", &PRIVATE_DATA_ATTRS, false);
// TODO: lock and _unlock, except high access members
// TODO #59: lock and _unlock, except high access members
});
}
@ -381,7 +381,7 @@ fn test_default_entries_rbac_admins_access_control_entries() {
}
// read schema entries.
// TODO: write schema entries
// TODO #252: write schema entries
#[test]
fn test_default_entries_rbac_admins_schema_entries() {
run_test(|rsclient: KanidmClient| {

View file

@ -122,6 +122,13 @@ pub struct Group {
pub uuid: String,
}
impl fmt::Display for Group {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[ name: {}, ", self.name)?;
write!(f, "uuid: {} ]", self.uuid)
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Claim {
pub name: String,
@ -131,11 +138,13 @@ pub struct Claim {
// pub expiry: DateTime
}
/*
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Application {
pub name: String,
pub uuid: String,
}
*/
// The currently authenticated user, and any required metadata for them
// to properly authorise them. This is similar in nature to oauth and the krb
@ -157,7 +166,8 @@ pub struct UserAuthToken {
pub spn: String,
pub displayname: String,
pub uuid: String,
pub application: Option<Application>,
// #[serde(skip_serializing_if = "Option::is_none")]
// pub application: Option<Application>,
pub groups: Vec<Group>,
pub claims: Vec<Claim>,
// Should we allow supplemental ava's to be added on request?
@ -191,10 +201,12 @@ pub struct RadiusAuthToken {
impl fmt::Display for RadiusAuthToken {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "name: {}", self.name)?;
writeln!(f, "display: {}", self.displayname)?;
writeln!(f, "displayname: {}", self.displayname)?;
writeln!(f, "uuid: {}", self.uuid)?;
writeln!(f, "secret: {}", self.secret)?;
writeln!(f, "groups: {:?}", self.groups)
self.groups
.iter()
.try_for_each(|g| writeln!(f, "group: {}", g))
}
}
@ -206,6 +218,15 @@ pub struct UnixGroupToken {
pub gidnumber: u32,
}
impl fmt::Display for UnixGroupToken {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[ spn: {}, ", self.spn)?;
write!(f, "gidnumber: {} ", self.gidnumber)?;
write!(f, "name: {}, ", self.name)?;
write!(f, "uuid: {} ]", self.uuid)
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GroupUnixExtend {
pub gidnumber: Option<u32>,
@ -218,11 +239,32 @@ pub struct UnixUserToken {
pub displayname: String,
pub gidnumber: u32,
pub uuid: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub shell: Option<String>,
pub groups: Vec<UnixGroupToken>,
pub sshkeys: Vec<String>,
}
impl fmt::Display for UnixUserToken {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "---")?;
writeln!(f, "spn: {}", self.name)?;
writeln!(f, "name: {}", self.name)?;
writeln!(f, "displayname: {}", self.displayname)?;
writeln!(f, "uuid: {}", self.uuid)?;
match &self.shell {
Some(s) => writeln!(f, "shell: {}", s)?,
None => writeln!(f, "shell: <none>")?,
}
self.sshkeys
.iter()
.try_for_each(|s| writeln!(f, "ssh_publickey: {}", s))?;
self.groups
.iter()
.try_for_each(|g| writeln!(f, "group: {}", g))
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct AccountUnixExtend {
pub gidnumber: Option<u32>,
@ -241,6 +283,15 @@ pub struct Entry {
pub attrs: BTreeMap<String, Vec<String>>,
}
impl fmt::Display for Entry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "---")?;
self.attrs
.iter()
.try_for_each(|(k, vs)| vs.iter().try_for_each(|v| writeln!(f, "{}: {}", k, v)))
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Filter {
@ -378,8 +429,8 @@ impl fmt::Debug for AuthCredential {
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthStep {
// name, application id?
Init(String, Option<String>),
// name
Init(String),
/*
Step(
Type(params ....)

View file

@ -34,14 +34,13 @@ else:
USER = CONFIG.get("kanidm_client", "user")
SECRET = CONFIG.get("kanidm_client", "secret")
DEFAULT_VLAN = CONFIG.get("radiusd", "vlan")
CACHE_PATH = CONFIG.get("radiusd", "cache_path")
TIMEOUT = 8
URL = CONFIG.get('kanidm_client', 'url')
AUTH_URL = "%s/v1/auth" % URL
def _authenticate(s, acct, pw):
init_auth = {"step": { "init": [acct, None]}}
init_auth = {"step": {"init": acct}}
r = s.post(AUTH_URL, json=init_auth, verify=CA, timeout=TIMEOUT)
if r.status_code != 200:
@ -64,36 +63,8 @@ def _get_radius_token(username):
r = s.get(rtok_url, verify=CA, timeout=TIMEOUT)
if r.status_code != 200:
raise Exception("Failed to get RadiusAuthToken")
tok = r.json()
return(tok)
def _update_cache(token):
# Ensure the dir exists
try:
os.makedirs(CACHE_PATH, mode=0o700)
except:
# Already exists
pass
# User Item
item = os.path.join(CACHE_PATH, token["uuid"])
uitem = os.path.join(CACHE_PATH, token["name"])
# Token to json.
with open(item, 'w') as f:
json.dump(token, f)
# Symlink username -> uuid
try:
os.symlink(item, uitem)
except Exception as e:
print(e)
def _get_cache(username):
print("Getting cached token ...")
uitem = os.path.join(CACHE_PATH, username)
try:
with open(uitem, 'r') as f:
return json.load(f)
except:
None
else:
return r.json()
def check_vlan(acc, group):
if CONFIG.has_section("group.%s" % group['name']):
@ -119,12 +90,8 @@ def authorize(args):
try:
tok = _get_radius_token(username)
# Update the cache?
_update_cache(tok)
except Exception as e:
print(e)
# Attempt the cache.
tok = _get_cache(username)
if tok == None:
return radiusd.RLM_MODULE_NOTFOUND

View file

@ -39,7 +39,7 @@ fn main() {
info!("Kanidm badlist preprocessor - this may take a long time ...");
// Build a temp struct for all the pws.
// TODO: Shellexpand all of these.
// Shellexpand all of these.
/*
let expanded_paths: Vec<_> = opt.password_list.iter()
.map(|p| {

View file

@ -12,8 +12,6 @@ pub struct AccountCommonOpt {
pub struct AccountCredentialSet {
#[structopt(flatten)]
aopts: AccountCommonOpt,
#[structopt()]
application_id: Option<String>,
#[structopt(flatten)]
copt: CommonOpt,
}
@ -226,7 +224,7 @@ impl AccountOpt {
let token = client
.idm_account_unix_token_get(aopt.aopts.account_id.as_str())
.unwrap();
println!("{:?}", token);
println!("{}", token);
}
AccountPosix::Set(aopt) => {
let client = aopt.copt.to_client();
@ -292,7 +290,7 @@ impl AccountOpt {
let client = copt.to_client();
let r = client.idm_account_list().unwrap();
for e in r {
println!("{:?}", e);
println!("{}", e);
}
}
AccountOpt::Get(aopt) => {
@ -300,7 +298,10 @@ impl AccountOpt {
let e = client
.idm_account_get(aopt.aopts.account_id.as_str())
.unwrap();
println!("{:?}", e);
match e {
Some(e) => println!("{}", e),
None => println!("No matching entries"),
}
}
AccountOpt::Delete(aopt) => {
let client = aopt.copt.to_client();

View file

@ -72,7 +72,7 @@ impl GroupOpt {
let client = copt.to_client();
let r = client.idm_group_list().unwrap();
for e in r {
println!("{:?}", e);
println!("{}", e);
}
}
GroupOpt::Create(gcopt) => {
@ -118,7 +118,7 @@ impl GroupOpt {
let token = client
.idm_group_unix_token_get(gcopt.name.as_str())
.unwrap();
println!("{:?}", token);
println!("{}", token);
}
GroupPosix::Set(gcopt) => {
let client = gcopt.copt.to_client();

View file

@ -76,7 +76,7 @@ impl RawOpt {
let rset = client.search(filter).unwrap();
rset.iter().for_each(|e| {
println!("{:?}", e);
println!("{}", e);
});
}
RawOpt::Create(copt) => {
@ -86,7 +86,7 @@ impl RawOpt {
Some(p) => {
let r_entries: Vec<BTreeMap<String, Vec<String>>> = read_file(p).unwrap();
let entries = r_entries.into_iter().map(|b| Entry { attrs: b }).collect();
client.create(entries).unwrap()
client.create(entries).unwrap();
}
None => {
println!("Must provide a file");
@ -101,7 +101,7 @@ impl RawOpt {
let filter: Filter = serde_json::from_str(mopt.filter.as_str()).unwrap();
let r_list: Vec<Modify> = read_file(p).unwrap();
let modlist = ModifyList::new_list(r_list);
client.modify(filter, modlist).unwrap()
client.modify(filter, modlist).unwrap();
}
None => {
println!("Must provide a file");

View file

@ -29,13 +29,16 @@ impl RecycleOpt {
let client = copt.to_client();
let r = client.recycle_bin_list().unwrap();
for e in r {
println!("{:?}", e);
println!("{}", e);
}
}
RecycleOpt::Get(nopt) => {
let client = nopt.copt.to_client();
let e = client.recycle_bin_get(nopt.name.as_str()).unwrap();
println!("{:?}", e);
match e {
Some(e) => println!("{}", e),
None => println!("No matching entries"),
}
}
RecycleOpt::Revive(nopt) => {
let client = nopt.copt.to_client();

View file

@ -1,3 +1,4 @@
#![deny(warnings)]
#[macro_use]
extern crate libnss;
#[macro_use]
@ -11,8 +12,6 @@ use libnss::group::{Group, GroupHooks};
use libnss::interop::Response;
use libnss::passwd::{Passwd, PasswdHooks};
use libc;
struct KanidmPasswd;
libnss_passwd_hooks!(kanidm, KanidmPasswd);
@ -27,7 +26,7 @@ impl PasswdHooks for KanidmPasswd {
ClientResponse::NssAccounts(l) => l.into_iter().map(passwd_from_nssuser).collect(),
_ => Vec::new(),
})
.map(|v| Response::Success(v))
.map(Response::Success)
.unwrap_or_else(|_| Response::Success(vec![]))
}
@ -40,7 +39,7 @@ impl PasswdHooks for KanidmPasswd {
.map(|r| match r {
ClientResponse::NssAccount(opt) => opt
.map(passwd_from_nssuser)
.map(|p| Response::Success(p))
.map(Response::Success)
.unwrap_or_else(|| Response::NotFound),
_ => Response::NotFound,
})
@ -56,7 +55,7 @@ impl PasswdHooks for KanidmPasswd {
.map(|r| match r {
ClientResponse::NssAccount(opt) => opt
.map(passwd_from_nssuser)
.map(|p| Response::Success(p))
.map(Response::Success)
.unwrap_or_else(|| Response::NotFound),
_ => Response::NotFound,
})
@ -78,7 +77,7 @@ impl GroupHooks for KanidmGroup {
ClientResponse::NssGroups(l) => l.into_iter().map(group_from_nssgroup).collect(),
_ => Vec::new(),
})
.map(|v| Response::Success(v))
.map(Response::Success)
.unwrap_or_else(|_| Response::Success(vec![]))
}
@ -91,7 +90,7 @@ impl GroupHooks for KanidmGroup {
.map(|r| match r {
ClientResponse::NssGroup(opt) => opt
.map(group_from_nssgroup)
.map(|p| Response::Success(p))
.map(Response::Success)
.unwrap_or_else(|| Response::NotFound),
_ => Response::NotFound,
})
@ -107,7 +106,7 @@ impl GroupHooks for KanidmGroup {
.map(|r| match r {
ClientResponse::NssGroup(opt) => opt
.map(group_from_nssgroup)
.map(|p| Response::Success(p))
.map(Response::Success)
.unwrap_or_else(|| Response::NotFound),
_ => Response::NotFound,
})

View file

@ -1,3 +1,4 @@
#![deny(warnings)]
extern crate libc;
mod pam;

View file

@ -1,7 +1,5 @@
use libc::{c_int, c_uint};
// TODO: Import constants from C header file at compile time.
pub type PamFlag = c_uint;
pub type PamItemType = c_int;
pub type PamMessageStyle = c_int;

View file

@ -18,7 +18,7 @@
/// pam_hooks!(MyPamModule);
///
/// impl PamHooks for MyPamModule {
/// fn sm_authenticate(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode {
/// fn sm_authenticate(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode {
/// println!("Everybody is authenticated!");
/// PamResultCode::PAM_SUCCESS
/// }

View file

@ -86,11 +86,11 @@ impl PamHandle {
/// See `pam_get_data` in
/// http://www.linux-pam.org/Linux-PAM-html/mwg-expected-by-module-item.html
pub unsafe fn get_data<'a, T>(&'a self, key: &str) -> PamResult<&'a T> {
let c_key = CString::new(key).unwrap().as_ptr();
let c_key = CString::new(key).unwrap();
let mut ptr: *const PamDataT = ptr::null();
let res = pam_get_data(self, c_key, &mut ptr);
let res = pam_get_data(self, c_key.as_ptr(), &mut ptr);
if PamResultCode::PAM_SUCCESS == res && !ptr.is_null() {
let typed_ptr: *const T = mem::transmute(ptr);
let typed_ptr: *const T = ptr as *const T;
let data: &T = &*typed_ptr;
Ok(data)
} else {
@ -104,11 +104,11 @@ impl PamHandle {
/// See `pam_set_data` in
/// http://www.linux-pam.org/Linux-PAM-html/mwg-expected-by-module-item.html
pub fn set_data<T>(&self, key: &str, data: Box<T>) -> PamResult<()> {
let c_key = CString::new(key).unwrap().as_ptr();
let c_key = CString::new(key).unwrap();
let res = unsafe {
let c_data: Box<PamDataT> = mem::transmute(data);
let c_data = Box::into_raw(c_data);
pam_set_data(self, c_key, c_data, cleanup::<T>)
pam_set_data(self, c_key.as_ptr(), c_data, cleanup::<T>)
};
if PamResultCode::PAM_SUCCESS == res {
Ok(())
@ -126,7 +126,7 @@ impl PamHandle {
let mut ptr: *const PamItemT = ptr::null();
let (res, item) = unsafe {
let r = pam_get_item(self, T::item_type(), &mut ptr);
let typed_ptr: *const T = mem::transmute(ptr);
let typed_ptr: *const T = ptr as *const T;
let t: &T = &*typed_ptr;
(r, t)
};
@ -145,7 +145,7 @@ impl PamHandle {
/// See `pam_set_item` in
/// http://www.linux-pam.org/Linux-PAM-html/mwg-expected-by-module-item.html
pub fn set_item_str<T: PamItem>(&mut self, item: &str) -> PamResult<()> {
let c_item = CString::new(item).unwrap().as_ptr();
let c_item = CString::new(item).unwrap();
let res = unsafe {
pam_set_item(
@ -153,7 +153,7 @@ impl PamHandle {
T::item_type(),
// unwrapping is okay here, as c_item will not be a NULL
// pointer
(c_item as *const PamItemT).as_ref().unwrap(),
(c_item.as_ptr() as *const PamItemT).as_ref().unwrap(),
)
};
if PamResultCode::PAM_SUCCESS == res {
@ -171,11 +171,14 @@ impl PamHandle {
/// http://www.linux-pam.org/Linux-PAM-html/mwg-expected-by-module-item.html
pub fn get_user(&self, prompt: Option<&str>) -> PamResult<String> {
let ptr: *mut c_char = ptr::null_mut();
let c_prompt = match prompt {
Some(p) => CString::new(p).unwrap().as_ptr(),
None => ptr::null(),
let res = match prompt {
Some(p) => {
let c_prompt = CString::new(p).unwrap();
unsafe { pam_get_user(self, &ptr, c_prompt.as_ptr()) }
}
None => unsafe { pam_get_user(self, &ptr, ptr::null()) },
};
let res = unsafe { pam_get_user(self, &ptr, c_prompt) };
if PamResultCode::PAM_SUCCESS == res && !ptr.is_null() {
let const_ptr = ptr as *const c_char;
let bytes = unsafe { CStr::from_ptr(const_ptr).to_bytes() };
@ -190,7 +193,7 @@ impl PamHandle {
let (res, item) = unsafe {
let r = pam_get_item(self, PAM_AUTHTOK, &mut ptr);
let t = if PamResultCode::PAM_SUCCESS == r && !ptr.is_null() {
let typed_ptr: *const c_char = mem::transmute(ptr);
let typed_ptr: *const c_char = ptr as *const c_char;
Some(CStr::from_ptr(typed_ptr).to_string_lossy().into_owned())
} else {
None

View file

@ -62,10 +62,10 @@ impl CacheLayer {
// We assume we are offline at start up, and we mark the next "online check" as
// being valid from "now".
Ok(CacheLayer {
db: db,
client: client,
db,
client,
state: Mutex::new(CacheState::OfflineNextCheck(SystemTime::now())),
timeout_seconds: timeout_seconds,
timeout_seconds,
pam_allow_groups: pam_allow_groups.into_iter().collect(),
})
}
@ -171,7 +171,6 @@ impl CacheLayer {
.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|e| {
error!("time conversion error - ex_time less than epoch? {:?}", e);
()
})?;
let dbtxn = self.db.write();
@ -194,7 +193,6 @@ impl CacheLayer {
.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|e| {
error!("time conversion error - ex_time less than epoch? {:?}", e);
()
})?;
let dbtxn = self.db.write();
@ -358,7 +356,6 @@ impl CacheLayer {
// get the item from the cache
let (expired, item) = self.get_cached_usertoken(&account_id).map_err(|e| {
debug!("get_usertoken error -> {:?}", e);
()
})?;
let state = self.get_cachestate().await;
@ -406,7 +403,6 @@ impl CacheLayer {
debug!("get_grouptoken");
let (expired, item) = self.get_cached_grouptoken(&grp_id).map_err(|e| {
debug!("get_grouptoken error -> {:?}", e);
()
})?;
let state = self.get_cachestate().await;
@ -458,7 +454,7 @@ impl CacheLayer {
.unwrap_or_else(|_| Vec::new())
.into_iter()
.map(|ut| {
// TODO: We'll have a switch to convert this to spn in some configs
// TODO #181: We'll have a switch to convert this to spn in some configs
// in the future.
ut.name
})
@ -468,7 +464,7 @@ impl CacheLayer {
// Get ssh keys for an account id
pub async fn get_sshkeys(&self, account_id: &str) -> Result<Vec<String>, ()> {
let token = self.get_usertoken(Id::Name(account_id.to_string())).await?;
Ok(token.map(|t| t.sshkeys).unwrap_or_else(|| Vec::new()))
Ok(token.map(|t| t.sshkeys).unwrap_or_else(Vec::new))
}
pub fn get_nssaccounts(&self) -> Result<Vec<NssUser>, ()> {
@ -480,7 +476,7 @@ impl CacheLayer {
name: tok.name,
gid: tok.gidnumber,
gecos: tok.displayname,
// TODO: default shell override.
// TODO #254: default shell override.
shell: tok.shell.unwrap_or_else(|| "/bin/bash".to_string()),
}
})
@ -496,7 +492,7 @@ impl CacheLayer {
name: tok.name,
gid: tok.gidnumber,
gecos: tok.displayname,
// TODO: default shell override.
// TODO #254: default shell override.
shell: tok.shell.unwrap_or_else(|| "/bin/bash".to_string()),
}
}))
@ -518,7 +514,7 @@ impl CacheLayer {
NssGroup {
name: tok.name,
gid: tok.gidnumber,
members: members,
members,
}
})
.collect()
@ -533,7 +529,7 @@ impl CacheLayer {
NssGroup {
name: tok.name,
gid: tok.gidnumber,
members: members,
members,
}
}))
}
@ -639,8 +635,7 @@ impl CacheLayer {
user_set, self.pam_allow_groups
);
let b = user_set.intersection(&self.pam_allow_groups).count() > 0;
b
user_set.intersection(&self.pam_allow_groups).count() > 0
}))
}

View file

@ -1,4 +1,4 @@
pub const DEFAULT_SOCK_PATH: &'static str = "/var/run/kanidm-unixd/sock";
pub const DEFAULT_DB_PATH: &'static str = "/var/cache/kanidm-unixd/kanidm.cache.db";
pub const DEFAULT_SOCK_PATH: &str = "/var/run/kanidm-unixd/sock";
pub const DEFAULT_DB_PATH: &str = "/var/cache/kanidm-unixd/kanidm.cache.db";
pub const DEFAULT_CONN_TIMEOUT: u64 = 2;
pub const DEFAULT_CACHE_TIMEOUT: u64 = 15;

View file

@ -1,3 +1,4 @@
#![deny(warnings)]
#[macro_use]
extern crate log;
@ -61,7 +62,6 @@ impl ClientCodec {
fn rm_if_exist(p: &str) {
let _ = std::fs::remove_file(p).map_err(|e| {
error!("attempting to remove {:?} -> {:?}", p, e);
()
});
}
@ -80,7 +80,7 @@ async fn handle_client(
cachelayer
.get_sshkeys(account_id.as_str())
.await
.map(|r| ClientResponse::SshKeys(r))
.map(ClientResponse::SshKeys)
.unwrap_or_else(|_| {
error!("unable to load keys, returning empty set.");
ClientResponse::SshKeys(vec![])
@ -90,7 +90,7 @@ async fn handle_client(
debug!("nssaccounts req");
cachelayer
.get_nssaccounts()
.map(|r| ClientResponse::NssAccounts(r))
.map(ClientResponse::NssAccounts)
.unwrap_or_else(|_| {
error!("unable to enum accounts");
ClientResponse::NssAccounts(Vec::new())
@ -101,7 +101,7 @@ async fn handle_client(
cachelayer
.get_nssaccount_gid(gid)
.await
.map(|acc| ClientResponse::NssAccount(acc))
.map(ClientResponse::NssAccount)
.unwrap_or_else(|_| {
error!("unable to load account, returning empty.");
ClientResponse::NssAccount(None)
@ -112,7 +112,7 @@ async fn handle_client(
cachelayer
.get_nssaccount_name(account_id.as_str())
.await
.map(|acc| ClientResponse::NssAccount(acc))
.map(ClientResponse::NssAccount)
.unwrap_or_else(|_| {
error!("unable to load account, returning empty.");
ClientResponse::NssAccount(None)
@ -122,7 +122,7 @@ async fn handle_client(
debug!("nssgroups req");
cachelayer
.get_nssgroups()
.map(|r| ClientResponse::NssGroups(r))
.map(ClientResponse::NssGroups)
.unwrap_or_else(|_| {
error!("unable to enum groups");
ClientResponse::NssGroups(Vec::new())
@ -133,7 +133,7 @@ async fn handle_client(
cachelayer
.get_nssgroup_gid(gid)
.await
.map(|grp| ClientResponse::NssGroup(grp))
.map(ClientResponse::NssGroup)
.unwrap_or_else(|_| {
error!("unable to load group, returning empty.");
ClientResponse::NssGroup(None)
@ -144,7 +144,7 @@ async fn handle_client(
cachelayer
.get_nssgroup_name(grp_id.as_str())
.await
.map(|grp| ClientResponse::NssGroup(grp))
.map(ClientResponse::NssGroup)
.unwrap_or_else(|_| {
error!("unable to load group, returning empty.");
ClientResponse::NssGroup(None)
@ -155,7 +155,7 @@ async fn handle_client(
cachelayer
.pam_account_authenticate(account_id.as_str(), cred.as_str())
.await
.map(|r| ClientResponse::PamStatus(r))
.map(ClientResponse::PamStatus)
.unwrap_or(ClientResponse::Error)
}
ClientRequest::PamAccountAllowed(account_id) => {
@ -163,7 +163,7 @@ async fn handle_client(
cachelayer
.pam_account_allowed(account_id.as_str())
.await
.map(|r| ClientResponse::PamStatus(r))
.map(ClientResponse::PamStatus)
.unwrap_or(ClientResponse::Error)
}
ClientRequest::InvalidateCache => {

View file

@ -30,11 +30,10 @@ impl Db {
let builder1 = Pool::builder().max_size(1);
let pool = builder1.build(manager).map_err(|e| {
error!("r2d2 error {:?}", e);
()
})?;
Ok(Db {
pool: pool,
pool,
lock: Mutex::new(()),
})
}
@ -91,7 +90,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite account_t create error -> {:?}", e);
()
})?;
self.conn
@ -109,7 +107,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite group_t create error -> {:?}", e);
()
})?;
self.conn
@ -125,7 +122,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite memberof_t create error -> {:?}", e);
()
})?;
Ok(())
@ -141,7 +137,6 @@ impl<'a> DbTxn<'a> {
.map(|_| ())
.map_err(|e| {
error!("sqlite commit failure -> {:?}", e);
()
})
}
@ -150,14 +145,12 @@ impl<'a> DbTxn<'a> {
.execute("UPDATE group_t SET expiry = 0", NO_PARAMS)
.map_err(|e| {
error!("sqlite update group_t failure -> {:?}", e);
()
})?;
self.conn
.execute("UPDATE account_t SET expiry = 0", NO_PARAMS)
.map_err(|e| {
error!("sqlite update account_t failure -> {:?}", e);
()
})?;
Ok(())
@ -168,14 +161,12 @@ impl<'a> DbTxn<'a> {
.execute("DELETE FROM group_t", NO_PARAMS)
.map_err(|e| {
error!("sqlite delete group_t failure -> {:?}", e);
()
})?;
self.conn
.execute("DELETE FROM account_t", NO_PARAMS)
.map_err(|e| {
error!("sqlite delete group_t failure -> {:?}", e);
()
})?;
Ok(())
@ -188,7 +179,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
// Makes tuple (token, expiry)
@ -196,26 +186,23 @@ impl<'a> DbTxn<'a> {
.query_map(&[account_id], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
data
}
fn get_account_data_gid(&self, gid: &u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
fn get_account_data_gid(&self, gid: u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token, expiry FROM account_t WHERE gidnumber = :gid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
// Makes tuple (token, expiry)
@ -223,13 +210,11 @@ impl<'a> DbTxn<'a> {
.query_map(&[gid], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
@ -239,7 +224,7 @@ impl<'a> DbTxn<'a> {
pub fn get_account(&self, account_id: &Id) -> Result<Option<(UnixUserToken, u64)>, ()> {
let data = match account_id {
Id::Name(n) => self.get_account_data_name(n.as_str()),
Id::Gid(g) => self.get_account_data_gid(g),
Id::Gid(g) => self.get_account_data_gid(*g),
}?;
// Assert only one result?
@ -254,11 +239,9 @@ impl<'a> DbTxn<'a> {
// token convert with cbor.
let t = serde_cbor::from_slice(token.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
let e = u64::try_from(*expiry).map_err(|e| {
error!("u64 convert error -> {:?}", e);
()
})?;
Ok((t, e))
})
@ -272,20 +255,17 @@ impl<'a> DbTxn<'a> {
.prepare("SELECT token FROM account_t")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
let data_iter = stmt
.query_map(NO_PARAMS, |row| Ok(row.get(0)?))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
@ -297,7 +277,6 @@ impl<'a> DbTxn<'a> {
// token convert with cbor.
serde_cbor::from_slice(token.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})
})
.collect()
@ -306,11 +285,9 @@ impl<'a> DbTxn<'a> {
pub fn update_account(&self, account: &UnixUserToken, expire: u64) -> Result<(), ()> {
let data = serde_cbor::to_vec(account).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
let expire = i64::try_from(expire).map_err(|e| {
error!("i64 convert error -> {:?}", e);
()
})?;
// This is needed because sqlites 'insert or replace into', will null the password field
@ -328,7 +305,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
debug!("sqlite delete account_t duplicate failure -> {:?}", e);
()
})
.map(|_| ())?;
@ -345,7 +321,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
debug!("sqlite delete account_t duplicate failure -> {:?}", e);
()
})
.map(|c| c)?;
@ -354,7 +329,6 @@ impl<'a> DbTxn<'a> {
.prepare("INSERT INTO account_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry) ON CONFLICT(uuid) DO UPDATE SET name=excluded.name, spn=excluded.name, gidnumber=excluded.gidnumber, token=excluded.token, expiry=excluded.expiry")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
()
})?;
stmt.execute_named(&[
@ -367,11 +341,9 @@ impl<'a> DbTxn<'a> {
])
.map(|r| {
debug!("insert -> {:?}", r);
()
})
.map_err(|e| {
error!("sqlite execute_named error -> {:?}", e);
()
})?;
}
@ -383,16 +355,13 @@ impl<'a> DbTxn<'a> {
.prepare("DELETE FROM memberof_t WHERE a_uuid = :a_uuid")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
()
})?;
stmt.execute(&[&account.uuid])
.map(|r| {
debug!("delete memberships -> {:?}", r);
()
})
.map_err(|e| {
error!("sqlite execute error -> {:?}", e);
()
})?;
let mut stmt = self
@ -400,18 +369,15 @@ impl<'a> DbTxn<'a> {
.prepare("INSERT INTO memberof_t (a_uuid, g_uuid) VALUES (:a_uuid, :g_uuid)")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
()
})?;
// Now for each group, add the relation.
account.groups.iter().try_for_each(|g| {
stmt.execute_named(&[(":a_uuid", &account.uuid), (":g_uuid", &g.uuid)])
.map(|r| {
debug!("insert membership -> {:?}", r);
()
})
.map_err(|e| {
error!("sqlite execute_named error -> {:?}", e);
()
})
})
}
@ -422,7 +388,6 @@ impl<'a> DbTxn<'a> {
.map(|_| ())
.map_err(|e| {
error!("sqlite memberof_t create error -> {:?}", e);
()
})
}
@ -431,7 +396,6 @@ impl<'a> DbTxn<'a> {
let dbpw = pw.to_dbpasswordv1();
let data = serde_cbor::to_vec(&dbpw).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
self.conn
@ -441,7 +405,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite update account_t password failure -> {:?}", e);
()
})
.map(|_| ())
}
@ -452,7 +415,6 @@ impl<'a> DbTxn<'a> {
.prepare("SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
// Makes tuple (token, expiry)
@ -460,20 +422,18 @@ impl<'a> DbTxn<'a> {
.query_map(&[a_uuid], |row| Ok(row.get(0)?))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
let data = data?;
if data.len() == 0 {
if data.is_empty() {
info!("No cached password, failing authentication");
return Ok(false);
}
@ -489,7 +449,6 @@ impl<'a> DbTxn<'a> {
// Map the option from data.first.
let dbpw: DbPasswordV1 = serde_cbor::from_slice(raw.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
let pw = Password::try_from(dbpw)?;
Ok(pw.verify(cred))
@ -505,7 +464,6 @@ impl<'a> DbTxn<'a> {
)
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
// Makes tuple (token, expiry)
@ -513,26 +471,23 @@ impl<'a> DbTxn<'a> {
.query_map(&[grp_id], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
data
}
fn get_group_data_gid(&self, gid: &u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
fn get_group_data_gid(&self, gid: u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token, expiry FROM group_t WHERE gidnumber = :gid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
// Makes tuple (token, expiry)
@ -540,13 +495,11 @@ impl<'a> DbTxn<'a> {
.query_map(&[gid], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
@ -556,7 +509,7 @@ impl<'a> DbTxn<'a> {
pub fn get_group(&self, grp_id: &Id) -> Result<Option<(UnixGroupToken, u64)>, ()> {
let data = match grp_id {
Id::Name(n) => self.get_group_data_name(n.as_str()),
Id::Gid(g) => self.get_group_data_gid(g),
Id::Gid(g) => self.get_group_data_gid(*g),
}?;
// Assert only one result?
@ -571,11 +524,9 @@ impl<'a> DbTxn<'a> {
// token convert with cbor.
let t = serde_cbor::from_slice(token.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
let e = u64::try_from(*expiry).map_err(|e| {
error!("u64 convert error -> {:?}", e);
()
})?;
Ok((t, e))
})
@ -589,20 +540,17 @@ impl<'a> DbTxn<'a> {
.prepare("SELECT account_t.token FROM (account_t, memberof_t) WHERE account_t.uuid = memberof_t.a_uuid AND memberof_t.g_uuid = :g_uuid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
let data_iter = stmt
.query_map(&[g_uuid], |row| Ok(row.get(0)?))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
@ -615,7 +563,6 @@ impl<'a> DbTxn<'a> {
// debug!("{:?}", token);
serde_cbor::from_slice(token.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})
})
.collect()
@ -627,20 +574,17 @@ impl<'a> DbTxn<'a> {
.prepare("SELECT token FROM group_t")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
()
})?;
let data_iter = stmt
.query_map(NO_PARAMS, |row| Ok(row.get(0)?))
.map_err(|e| {
error!("sqlite query_map failure -> {:?}", e);
()
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| {
v.map_err(|e| {
error!("sqlite map failure -> {:?}", e);
()
})
})
.collect();
@ -653,7 +597,6 @@ impl<'a> DbTxn<'a> {
// debug!("{:?}", token);
serde_cbor::from_slice(token.as_slice()).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})
})
.collect()
@ -662,18 +605,15 @@ impl<'a> DbTxn<'a> {
pub fn update_group(&self, grp: &UnixGroupToken, expire: u64) -> Result<(), ()> {
let data = serde_cbor::to_vec(grp).map_err(|e| {
error!("cbor error -> {:?}", e);
()
})?;
let expire = i64::try_from(expire).map_err(|e| {
error!("i64 convert error -> {:?}", e);
()
})?;
let mut stmt = self.conn
.prepare("INSERT OR REPLACE INTO group_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry)")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
()
})?;
stmt.execute_named(&[
@ -686,11 +626,9 @@ impl<'a> DbTxn<'a> {
])
.map(|r| {
debug!("insert -> {:?}", r);
()
})
.map_err(|e| {
error!("sqlite execute_named error -> {:?}", e);
()
})
}
@ -700,7 +638,6 @@ impl<'a> DbTxn<'a> {
.map(|_| ())
.map_err(|e| {
error!("sqlite memberof_t create error -> {:?}", e);
()
})
}
}

View file

@ -1,4 +1,4 @@
// #![deny(warnings)]
#![deny(warnings)]
#![warn(unused_extern_crates)]
#[macro_use]

View file

@ -1,3 +1,4 @@
#![deny(warnings)]
#[macro_use]
extern crate log;
@ -34,7 +35,7 @@ async fn main() {
.read_options_from_optional_config("/etc/kanidm/unixd")
.expect("Failed to parse /etc/kanidm/unixd");
let req = ClientRequest::SshKey(opt.account_id.clone());
let req = ClientRequest::SshKey(opt.account_id);
match block_on(call_daemon(cfg.sock_path.as_str(), req)) {
Ok(r) => match r {

View file

@ -1,3 +1,4 @@
#![deny(warnings)]
#[macro_use]
extern crate log;
@ -37,7 +38,7 @@ async fn main() {
let password = rpassword::prompt_password_stderr("Enter unix password: ").unwrap();
let req = ClientRequest::PamAuthenticate(opt.account_id.clone(), password);
let sereq = ClientRequest::PamAccountAllowed(opt.account_id.clone());
let sereq = ClientRequest::PamAccountAllowed(opt.account_id);
match block_on(call_daemon(cfg.sock_path.as_str(), req)) {
Ok(r) => match r {

View file

@ -24,6 +24,12 @@ pub struct KanidmUnixdConfig {
pub pam_allowed_login_groups: Vec<String>,
}
impl Default for KanidmUnixdConfig {
fn default() -> Self {
KanidmUnixdConfig::new()
}
}
impl KanidmUnixdConfig {
pub fn new() -> Self {
KanidmUnixdConfig {

View file

@ -3,6 +3,7 @@ use std::sync::mpsc;
use std::thread;
use actix::prelude::*;
use kanidm::audit::LogLevel;
use kanidm::config::{Configuration, IntegrationTestConfig};
use kanidm::core::create_server_core;
@ -33,6 +34,7 @@ fn run_test(fix_fn: fn(&KanidmClient) -> (), test_fn: fn(CacheLayer, KanidmAsync
config.address = format!("127.0.0.1:{}", port);
config.secure_cookies = false;
config.integration_test_config = Some(int_config);
config.log_level = Some(LogLevel::Quiet as u32);
thread::spawn(move || {
// Spawn a thread for the test runner, this should have a unique
// port....

View file

@ -39,6 +39,7 @@ actix-files = "0.2"
log = "0.4"
env_logger = "0.7"
rand = "0.7"
toml = "0.5"
chrono = "0.4"
cookie = "0.13"

View file

@ -1,4 +1,4 @@
ARG BASE_IMAGE=opensuse/tumbleweed@sha256:211becaf2c9250ae36eb313abf5db38cd5e159cac2d920e1ef7bb62429a51585
ARG BASE_IMAGE=opensuse/tumbleweed:latest
FROM ${BASE_IMAGE} AS builder
LABEL mantainer william@blackhats.net.au
@ -46,6 +46,6 @@ EXPOSE 8443 3636
VOLUME /data
ENV RUST_BACKTRACE 1
CMD ["/sbin/kanidmd", "server", "-D", "/data/kanidm.db", "-C", "/data/ca.pem", \
"-c", "/data/cert.pem", "-k", "/data/key.pem", "--bindaddr", "0.0.0.0:8443"]
CMD ["/sbin/kanidmd", "server", "-c", "/data/server.toml"]

7
kanidmd/server.toml Normal file
View file

@ -0,0 +1,7 @@
bindaddress = "127.0.0.1:8443"
ldapbindaddress = "127.0.0.1:3636"
db_path = "/tmp/kanidm.db"
tls_ca = "../insecure/ca.pem"
tls_cert = "../insecure/cert.pem"
tls_key = "../insecure/key.pem"
# log_level = "

View file

@ -45,7 +45,6 @@ lazy_static! {
#[derive(Debug, Clone)]
pub struct AccessControlSearch {
acp: AccessControlProfile,
// TODO: Should this change to Value? May help to reduce transformations during processing.
attrs: BTreeSet<String>,
}
@ -62,12 +61,10 @@ impl AccessControlSearch {
));
}
let attrs = try_audit!(
audit,
value.get_ava_set_string("acp_search_attr").ok_or_else(|| {
OperationError::InvalidACPState("Missing acp_search_attr".to_string())
})
);
let attrs = value.get_ava_set_string("acp_search_attr").ok_or_else(|| {
ladmin_error!(audit, "Missing acp_search_attr");
OperationError::InvalidACPState("Missing acp_search_attr".to_string())
})?;
let acp = AccessControlProfile::try_from(audit, qs, value)?;
@ -283,47 +280,48 @@ impl AccessControlProfile {
}
// copy name
let name = try_audit!(
audit,
value
.get_ava_single_str("name")
.ok_or_else(|| OperationError::InvalidACPState("Missing name".to_string()))
)
.to_string();
let name = value
.get_ava_single_str("name")
.ok_or_else(|| {
ladmin_error!(audit, "Missing name");
OperationError::InvalidACPState("Missing name".to_string())
})?
.to_string();
// copy uuid
let uuid = *value.get_uuid();
// receiver, and turn to real filter
let receiver_f: ProtoFilter = try_audit!(
audit,
value
.get_ava_single_protofilter("acp_receiver")
.ok_or_else(|| OperationError::InvalidACPState("Missing acp_receiver".to_string()))
);
let receiver_f: ProtoFilter = value
.get_ava_single_protofilter("acp_receiver")
.ok_or_else(|| {
ladmin_error!(audit, "Missing acp_receiver");
OperationError::InvalidACPState("Missing acp_receiver".to_string())
})?;
// targetscope, and turn to real filter
let targetscope_f: ProtoFilter = try_audit!(
audit,
value
.get_ava_single_protofilter("acp_targetscope")
.ok_or_else(|| OperationError::InvalidACPState(
"Missing acp_targetscope".to_string()
))
);
let targetscope_f: ProtoFilter = value
.get_ava_single_protofilter("acp_targetscope")
.ok_or_else(|| {
ladmin_error!(audit, "Missing acp_targetscope");
OperationError::InvalidACPState("Missing acp_targetscope".to_string())
})?;
let receiver_i = try_audit!(audit, Filter::from_rw(audit, &receiver_f, qs));
let receiver = try_audit!(
audit,
receiver_i
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)
);
let receiver_i = Filter::from_rw(audit, &receiver_f, qs).map_err(|e| {
ladmin_error!(audit, "Receiver validation failed {:?}", e);
e
})?;
let receiver = receiver_i.validate(qs.get_schema()).map_err(|e| {
ladmin_error!(audit, "acp_receiver Schema Violation {:?}", e);
OperationError::SchemaViolation(e)
})?;
let targetscope_i = try_audit!(audit, Filter::from_rw(audit, &targetscope_f, qs));
let targetscope = try_audit!(
audit,
targetscope_i
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)
);
let targetscope_i = Filter::from_rw(audit, &targetscope_f, qs).map_err(|e| {
ladmin_error!(audit, "Targetscope validation failed {:?}", e);
e
})?;
let targetscope = targetscope_i.validate(qs.get_schema()).map_err(|e| {
ladmin_error!(audit, "acp_targetscope Schema Violation {:?}", e);
OperationError::SchemaViolation(e)
})?;
Ok(AccessControlProfile {
name,
@ -359,19 +357,17 @@ pub trait AccessControlsTransaction {
se: &SearchEvent,
entries: Vec<Entry<EntrySealed, EntryCommitted>>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
// If this is an internal search, return our working set.
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.event.origin {
EventOrigin::Internal => {
ltrace!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(entries);
}
EventOrigin::User(e) => &e,
};
lperf_segment!(audit, "access::search_filter_entries", || {
lsecurity_access!(audit, "Access check for event: {:?}", se);
// If this is an internal search, return our working set.
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(entries);
}
EventOrigin::User(e) => &e,
};
// Some useful references we'll use for the remainder of the operation
let search_state = self.get_search();
@ -443,9 +439,7 @@ pub trait AccessControlsTransaction {
acs
);
// add search_attrs to allowed.
let r: Vec<&str> =
acs.attrs.iter().map(|s| s.as_str()).collect();
Some(r)
Some(acs.attrs.iter().map(|s| s.as_str()))
} else {
lsecurity_access!(
audit,
@ -483,10 +477,10 @@ pub trait AccessControlsTransaction {
})
.collect();
if allowed_entries.len() > 0 {
lsecurity_access!(audit, "allowed {} entries ✅", allowed_entries.len());
} else {
if allowed_entries.is_empty() {
lsecurity_access!(audit, "denied ❌");
} else {
lsecurity_access!(audit, "allowed {} entries ✅", allowed_entries.len());
}
Ok(allowed_entries)
@ -499,6 +493,26 @@ pub trait AccessControlsTransaction {
se: &SearchEvent,
entries: Vec<Entry<EntrySealed, EntryCommitted>>,
) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> {
// If this is an internal search, do nothing. This can occur in some test cases ONLY
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.event.origin {
EventOrigin::Internal => {
if cfg!(test) {
ltrace!(audit, "TEST: Internal search in external interface - allowing due to cfg test ...");
// In tests we just push everything back.
return Ok(entries
.into_iter()
.map(|e| unsafe { e.into_reduced() })
.collect());
} else {
// In production we can't risk leaking data here, so we return
// empty sets.
lsecurity_critical!(audit, "IMPOSSIBLE STATE: Internal search in external interface?! Returning empty for safety.");
// No need to check ACS
return Ok(Vec::new());
}
}
EventOrigin::User(e) => &e,
};
lperf_segment!(audit, "access::search_filter_entry_attributes", || {
/*
* Super similar to above (could even re-use some parts). Given a set of entries,
@ -509,28 +523,6 @@ pub trait AccessControlsTransaction {
*/
lsecurity_access!(audit, "Access check and reduce for event: {:?}", se);
// If this is an internal search, do nothing. How this occurs in this
// interface is beyond me ....
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.event.origin {
EventOrigin::Internal => {
if cfg!(test) {
lsecurity_access!(audit, "TEST: Internal search in external interface - allowing due to cfg test ...");
// In tests we just push everything back.
return Ok(entries
.into_iter()
.map(|e| unsafe { e.into_reduced() })
.collect());
} else {
// In production we can't risk leaking data here, so we return
// empty sets.
ladmin_error!(audit, "IMPOSSIBLE STATE: Internal search in external interface?! Returning empty for safety.");
// No need to check ACS
return Ok(Vec::new());
}
}
EventOrigin::User(e) => &e,
};
// Some useful references we'll use for the remainder of the operation
let search_state = self.get_search();
@ -607,9 +599,12 @@ pub trait AccessControlsTransaction {
acs
);
// add search_attrs to allowed.
/*
let r: Vec<&str> =
acs.attrs.iter().map(|s| s.as_str()).collect();
Some(r)
*/
Some(acs.attrs.iter().map(|s| s.as_str()))
} else {
lsecurity_access!(
audit,
@ -649,11 +644,16 @@ pub trait AccessControlsTransaction {
})
.collect();
lsecurity_access!(
audit,
"attribute set reduced on {} entries",
allowed_entries.len()
);
if allowed_entries.is_empty() {
lsecurity_access!(audit, "reduced to empty set on all entries ❌");
} else {
lsecurity_access!(
audit,
"attribute set reduced on {} entries ✅",
allowed_entries.len()
);
}
Ok(allowed_entries)
})
}
@ -664,18 +664,17 @@ pub trait AccessControlsTransaction {
me: &ModifyEvent,
entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<bool, OperationError> {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.event.origin {
EventOrigin::Internal => {
ltrace!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
lperf_segment!(audit, "access::modify_allow_operation", || {
lsecurity_access!(audit, "Access check for event: {:?}", me);
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
// Some useful references we'll use for the remainder of the operation
let modify_state = self.get_modify();
@ -831,18 +830,15 @@ pub trait AccessControlsTransaction {
// Now check all the subsets are true. Remember, purge class
// is already checked above.
let mut result = true;
if !requested_pres.is_subset(&allowed_pres) {
lsecurity_access!(audit, "requested_pres is not a subset of allowed");
lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_pres, allowed_pres);
result = false;
}
if !requested_rem.is_subset(&allowed_rem) {
false
} else if !requested_rem.is_subset(&allowed_rem) {
lsecurity_access!(audit, "requested_rem is not a subset of allowed");
lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_rem, allowed_rem);
result = false;
}
if !requested_classes.is_subset(&allowed_classes) {
false
} else if !requested_classes.is_subset(&allowed_classes) {
lsecurity_access!(audit, "requested_classes is not a subset of allowed");
lsecurity_access!(
audit,
@ -850,10 +846,11 @@ pub trait AccessControlsTransaction {
requested_classes,
allowed_classes
);
result = false;
false
} else {
lsecurity_access!(audit, "passed pres, rem, classes check.");
true
}
lsecurity_access!(audit, "passed pres, rem, classes check.");
result
} // if acc == false
});
if r {
@ -871,18 +868,17 @@ pub trait AccessControlsTransaction {
ce: &CreateEvent,
entries: &[Entry<EntryInit, EntryNew>],
) -> Result<bool, OperationError> {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &ce.event.origin {
EventOrigin::Internal => {
ltrace!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
lperf_segment!(audit, "access::create_allow_operation", || {
lsecurity_access!(audit, "Access check for event: {:?}", ce);
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &ce.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
// Some useful references we'll use for the remainder of the operation
let create_state = self.get_create();
@ -1045,18 +1041,17 @@ pub trait AccessControlsTransaction {
de: &DeleteEvent,
entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<bool, OperationError> {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.event.origin {
EventOrigin::Internal => {
ltrace!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
lperf_segment!(audit, "access::delete_allow_operation", || {
lsecurity_access!(audit, "Access check for event: {:?}", de);
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
EventOrigin::User(e) => &e,
};
// Some useful references we'll use for the remainder of the operation
let delete_state = self.get_delete();
@ -1368,8 +1363,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object"],
"name": ["acp_invalid"],
@ -1383,8 +1376,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_invalid"],
@ -1398,8 +1389,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_invalid"],
@ -1416,8 +1405,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_valid"],
@ -1444,8 +1431,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_valid"],
@ -1465,8 +1450,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_delete"],
"name": ["acp_valid"],
@ -1495,8 +1478,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_search"],
"name": ["acp_invalid"],
@ -1518,8 +1499,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_invalid"],
@ -1541,8 +1520,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_search"],
"name": ["acp_invalid"],
@ -1563,8 +1540,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_search"],
"name": ["acp_valid"],
@ -1593,8 +1568,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_valid"],
@ -1617,8 +1590,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_modify"],
"name": ["acp_valid"],
@ -1638,8 +1609,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_modify"],
"name": ["acp_valid"],
@ -1670,8 +1639,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile"],
"name": ["acp_valid"],
@ -1693,8 +1660,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_create"],
"name": ["acp_valid"],
@ -1714,8 +1679,6 @@ mod tests {
audit,
&mut qs_write,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_create"],
"name": ["acp_valid"],
@ -1745,8 +1708,6 @@ mod tests {
let mut qs_write = qs.write(duration_from_epoch_now());
let e: &str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": [
"object",
@ -1792,10 +1753,11 @@ mod tests {
acw.update_search($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_search", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_acp_search", uuid::Uuid::new_v4(), None);
let res = acw
.search_filter_entries(&mut audit, $se, $entries)
.expect("op failed");
audit.write_log();
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
@ -1810,8 +1772,6 @@ mod tests {
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object"],
"name": ["testperson1"],
@ -1895,7 +1855,7 @@ mod tests {
acw.update_search($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_search_reduce", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_acp_search_reduce", uuid::Uuid::new_v4(), None);
// We still have to reduce the entries to be sure that we are good.
let res = acw
.search_filter_entries(&mut audit, $se, $entries)
@ -1911,6 +1871,8 @@ mod tests {
.map(|e| unsafe { e.into_reduced() })
.collect();
audit.write_log();
debug!("expect --> {:?}", expect_set);
debug!("result --> {:?}", reduced);
// should be ok, and same as expect.
@ -1919,8 +1881,6 @@ mod tests {
}
const JSON_TESTPERSON1_REDUCED: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"name": ["testperson1"]
}
@ -2018,10 +1978,12 @@ mod tests {
acw.update_modify($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_modify", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_acp_modify", uuid::Uuid::new_v4(), None);
let res = acw
.modify_allow_operation(&mut audit, $me, $entries)
.expect("op failed");
audit.write_log();
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
@ -2180,10 +2142,12 @@ mod tests {
acw.update_create($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_create", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_acp_create", uuid::Uuid::new_v4(), None);
let res = acw
.create_allow_operation(&mut audit, $ce, $entries)
.expect("op failed");
audit.write_log();
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
@ -2192,8 +2156,6 @@ mod tests {
}
const JSON_TEST_CREATE_AC1: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson1"],
@ -2202,8 +2164,6 @@ mod tests {
}"#;
const JSON_TEST_CREATE_AC2: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson1"],
@ -2213,8 +2173,6 @@ mod tests {
}"#;
const JSON_TEST_CREATE_AC3: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "notallowed"],
"name": ["testperson1"],
@ -2223,8 +2181,6 @@ mod tests {
}"#;
const JSON_TEST_CREATE_AC4: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "group"],
"name": ["testperson1"],
@ -2306,10 +2262,12 @@ mod tests {
acw.update_delete($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_delete", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_acp_delete", uuid::Uuid::new_v4(), None);
let res = acw
.delete_allow_operation(&mut audit, $de, $entries)
.expect("op failed");
audit.write_log();
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.

View file

@ -185,6 +185,7 @@ impl Message for IdmAccountUnixAuthMessage {
pub struct QueryServerReadV1 {
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
qs: QueryServer,
idms: Arc<IdmServer>,
ldap: Arc<LdapServer>,
@ -201,6 +202,7 @@ impl Actor for QueryServerReadV1 {
impl QueryServerReadV1 {
pub fn new(
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
qs: QueryServer,
idms: Arc<IdmServer>,
ldap: Arc<LdapServer>,
@ -208,6 +210,7 @@ impl QueryServerReadV1 {
info!("Starting query server v1 worker ...");
QueryServerReadV1 {
log,
log_level,
qs,
idms,
ldap,
@ -216,6 +219,7 @@ impl QueryServerReadV1 {
pub fn start(
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
query_server: QueryServer,
idms: Arc<IdmServer>,
ldap: Arc<LdapServer>,
@ -224,6 +228,7 @@ impl QueryServerReadV1 {
SyncArbiter::start(threads, move || {
QueryServerReadV1::new(
log.clone(),
log_level,
query_server.clone(),
idms.clone(),
ldap.clone(),
@ -241,8 +246,8 @@ impl Handler<SearchMessage> for QueryServerReadV1 {
type Result = Result<SearchResponse, OperationError>;
fn handle(&mut self, msg: SearchMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("search", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<SearchMessage>", || {
let mut audit = AuditScope::new("search", msg.eventid, self.log_level);
let res = lperf_op_segment!(&mut audit, "actors::v1_read::handle<SearchMessage>", || {
// Begin a read
let mut qs_read = self.qs.read();
@ -280,8 +285,8 @@ impl Handler<AuthMessage> for QueryServerReadV1 {
// "on top" of the db server concept. In this case we check if
// the credentials provided is sufficient to say if someone is
// "authenticated" or not.
let mut audit = AuditScope::new("auth", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<AuthMessage>", || {
let mut audit = AuditScope::new("auth", msg.eventid, self.log_level);
let res = lperf_op_segment!(&mut audit, "actors::v1_read::handle<AuthMessage>", || {
lsecurity!(audit, "Begin auth event {:?}", msg);
// Destructure it.
@ -290,7 +295,10 @@ impl Handler<AuthMessage> for QueryServerReadV1 {
let mut idm_write = self.idms.write();
let ae = try_audit!(audit, AuthEvent::from_message(msg));
let ae = AuthEvent::from_message(msg).map_err(|e| {
ladmin_error!(audit, "Failed to parse AuthEvent -> {:?}", e);
e
})?;
let ct = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
@ -299,7 +307,7 @@ impl Handler<AuthMessage> for QueryServerReadV1 {
// Trigger a session clean *before* we take any auth steps.
// It's important to do this before to ensure that timeouts on
// the session are enforced.
lperf_segment!(
lperf_trace_segment!(
audit,
"actors::v1_read::handle<AuthMessage> -> expire_auth_sessions",
|| { idm_write.expire_auth_sessions(ct) }
@ -328,8 +336,8 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
type Result = Result<WhoamiResponse, OperationError>;
fn handle(&mut self, msg: WhoamiMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("whoami", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<WhoamiMessage>", || {
let mut audit = AuditScope::new("whoami", msg.eventid, self.log_level);
let res = lperf_op_segment!(&mut audit, "actors::v1_read::handle<WhoamiMessage>", || {
// TODO #62: Move this to IdmServer!!!
// Begin a read
let mut qs_read = self.qs.read();
@ -386,8 +394,8 @@ impl Handler<InternalSearchMessage> for QueryServerReadV1 {
type Result = Result<Vec<ProtoEntry>, OperationError>;
fn handle(&mut self, msg: InternalSearchMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_search_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("internal_search_message", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalSearchMessage>",
|| {
@ -427,8 +435,12 @@ impl Handler<InternalSearchRecycledMessage> for QueryServerReadV1 {
msg: InternalSearchRecycledMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_search_recycle_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new(
"internal_search_recycle_message",
msg.eventid,
self.log_level,
);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalSearchRecycledMessage>",
|| {
@ -466,8 +478,9 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
type Result = Result<Option<String>, OperationError>;
fn handle(&mut self, msg: InternalRadiusReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_read_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit =
AuditScope::new("internal_radius_read_message", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalRadiusReadMessage>",
|| {
@ -528,8 +541,12 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
msg: InternalRadiusTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new(
"internal_radius_token_read_message",
msg.eventid,
self.log_level,
);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalRadiusTokenReadMessage>",
|| {
@ -578,8 +595,12 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
msg: InternalUnixUserTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_unix_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new(
"internal_unix_token_read_message",
msg.eventid,
self.log_level,
);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalUnixUserTokenReadMessage>",
|| {
@ -630,9 +651,12 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
msg: InternalUnixGroupTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit =
AuditScope::new("internal_unixgroup_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new(
"internal_unixgroup_token_read_message",
msg.eventid,
self.log_level,
);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalUnixGroupTokenReadMessage>",
|| {
@ -679,8 +703,9 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
type Result = Result<Vec<String>, OperationError>;
fn handle(&mut self, msg: InternalSshKeyReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_read_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit =
AuditScope::new("internal_sshkey_read_message", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalSshKeyReadMessage>",
|| {
@ -746,8 +771,9 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
tag,
eventid,
} = msg;
let mut audit = AuditScope::new("internal_sshkey_tag_read_message", eventid);
let res = lperf_segment!(
let mut audit =
AuditScope::new("internal_sshkey_tag_read_message", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<InternalSshKeyTagReadMessage>",
|| {
@ -813,8 +839,8 @@ impl Handler<IdmAccountUnixAuthMessage> for QueryServerReadV1 {
type Result = Result<Option<UnixUserToken>, OperationError>;
fn handle(&mut self, msg: IdmAccountUnixAuthMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_unix_auth", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_account_unix_auth", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<IdmAccountUnixAuthMessage>",
|| {
@ -884,8 +910,8 @@ impl Handler<LdapRequestMessage> for QueryServerReadV1 {
protomsg,
uat,
} = msg;
let mut audit = AuditScope::new("ldap_request_message", eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("ldap_request_message", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_read::handle<LdapRequestMessage>",
|| {

View file

@ -307,6 +307,7 @@ impl Message for SetAttributeMessage {
pub struct QueryServerWriteV1 {
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
qs: QueryServer,
idms: Arc<IdmServer>,
}
@ -322,18 +323,29 @@ impl Actor for QueryServerWriteV1 {
}
impl QueryServerWriteV1 {
pub fn new(log: Sender<Option<AuditScope>>, qs: QueryServer, idms: Arc<IdmServer>) -> Self {
pub fn new(
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
qs: QueryServer,
idms: Arc<IdmServer>,
) -> Self {
info!("Starting query server v1 worker ...");
QueryServerWriteV1 { log, qs, idms }
QueryServerWriteV1 {
log,
log_level,
qs,
idms,
}
}
pub fn start(
log: Sender<Option<AuditScope>>,
log_level: Option<u32>,
query_server: QueryServer,
idms: Arc<IdmServer>,
) -> actix::Addr<QueryServerWriteV1> {
SyncArbiter::start(1, move || {
QueryServerWriteV1::new(log.clone(), query_server.clone(), idms.clone())
QueryServerWriteV1::new(log.clone(), log_level, query_server.clone(), idms.clone())
})
}
@ -415,8 +427,8 @@ impl Handler<CreateMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: CreateMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("create", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("create", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<CreateMessage>",
|| {
@ -450,7 +462,7 @@ impl Handler<ModifyMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: ModifyMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("modify", msg.eventid.clone());
let mut audit = AuditScope::new("modify", msg.eventid, self.log_level);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<ModifyMessage>",
@ -483,8 +495,8 @@ impl Handler<DeleteMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: DeleteMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("delete", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("delete", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<DeleteMessage>",
|| {
@ -517,8 +529,8 @@ impl Handler<InternalDeleteMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: InternalDeleteMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("delete", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("delete", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<InternalDeleteMessage>",
|| {
@ -552,8 +564,8 @@ impl Handler<ReviveRecycledMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: ReviveRecycledMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("revive", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("revive", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<ReviveRecycledMessage>",
|| {
@ -592,8 +604,12 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
type Result = Result<SetCredentialResponse, OperationError>;
fn handle(&mut self, msg: InternalCredentialSetMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_credential_set_message", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new(
"internal_credential_set_message",
msg.eventid,
self.log_level,
);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<InternalCredentialSetMessage>",
|| {
@ -661,7 +677,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
idms_prox_write
.generate_account_password(&mut audit, &gpe)
.and_then(|r| idms_prox_write.commit(&mut audit).map(|_| r))
.map(|s| SetCredentialResponse::Token(s))
.map(SetCredentialResponse::Token)
}
SetCredentialRequest::TOTPGenerate(label) => {
let gte = GenerateTOTPEvent::from_parts(
@ -719,8 +735,8 @@ impl Handler<IdmAccountSetPasswordMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: IdmAccountSetPasswordMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_set_password", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_account_set_password", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountSetPasswordMessage>",
|| {
@ -760,8 +776,9 @@ impl Handler<InternalRegenerateRadiusMessage> for QueryServerWriteV1 {
msg: InternalRegenerateRadiusMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("idm_account_regenerate_radius", msg.eventid.clone());
let res = lperf_segment!(
let mut audit =
AuditScope::new("idm_account_regenerate_radius", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<InternalRegenerateRadiusMessage>",
|| {
@ -809,8 +826,8 @@ impl Handler<PurgeAttributeMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: PurgeAttributeMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge_attribute", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("purge_attribute", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<PurgeAttributeMessage>",
|| {
@ -857,8 +874,8 @@ impl Handler<RemoveAttributeValueMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: RemoveAttributeValueMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("remove_attribute_value", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("remove_attribute_value", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<RemoveAttributeValueMessage>",
|| {
@ -916,8 +933,8 @@ impl Handler<AppendAttributeMessage> for QueryServerWriteV1 {
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("append_attribute", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("append_attribute", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<AppendAttributeMessage>",
|| {
@ -952,8 +969,8 @@ impl Handler<SetAttributeMessage> for QueryServerWriteV1 {
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("set_attribute", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("set_attribute", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<SetAttributeMessage>",
|| {
@ -991,8 +1008,8 @@ impl Handler<InternalSshKeyCreateMessage> for QueryServerWriteV1 {
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("internal_sshkey_create", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("internal_sshkey_create", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<InternalSshKeyCreateMessage>",
|| {
@ -1024,8 +1041,8 @@ impl Handler<IdmAccountPersonExtendMessage> for QueryServerWriteV1 {
uuid_or_name,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_account_person_extend", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_account_person_extend", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountPersonExtendMessage>",
|| {
@ -1065,8 +1082,8 @@ impl Handler<IdmAccountUnixExtendMessage> for QueryServerWriteV1 {
shell,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_account_unix_extend", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_account_unix_extend", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountUnixExtendMessage>",
|| {
@ -1110,8 +1127,8 @@ impl Handler<IdmGroupUnixExtendMessage> for QueryServerWriteV1 {
gidnumber,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_group_unix_extend", eventid);
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_group_unix_extend", eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<IdmGroupUnixExtendMessage>",
|| {
@ -1148,8 +1165,8 @@ impl Handler<IdmAccountUnixSetCredMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: IdmAccountUnixSetCredMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_unix_set_cred", msg.eventid.clone());
let res = lperf_segment!(
let mut audit = AuditScope::new("idm_account_unix_set_cred", msg.eventid, self.log_level);
let res = lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountUnixSetCredMessage>",
|| {
@ -1198,8 +1215,8 @@ impl Handler<PurgeTombstoneEvent> for QueryServerWriteV1 {
type Result = ();
fn handle(&mut self, msg: PurgeTombstoneEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge tombstones", msg.eventid.clone());
lperf_segment!(
let mut audit = AuditScope::new("purge tombstones", msg.eventid, self.log_level);
lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<PurgeTombstoneEvent>",
|| {
@ -1224,8 +1241,8 @@ impl Handler<PurgeRecycledEvent> for QueryServerWriteV1 {
type Result = ();
fn handle(&mut self, msg: PurgeRecycledEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge recycled", msg.eventid.clone());
lperf_segment!(
let mut audit = AuditScope::new("purge recycled", msg.eventid, self.log_level);
lperf_op_segment!(
&mut audit,
"actors::v1_write::handle<PurgeRecycledEvent>",
|| {

View file

@ -7,21 +7,69 @@ use std::time::SystemTime;
use chrono::offset::Utc;
use chrono::DateTime;
use uuid::adapter::HyphenatedRef;
use uuid::Uuid;
use std::str::FromStr;
#[derive(Debug, Serialize, Deserialize)]
#[repr(u32)]
pub enum LogTag {
AdminError,
AdminWarning,
AdminInfo,
RequestError,
Security,
SecurityAccess,
Filter,
FilterError,
FilterWarning,
Trace,
AdminError = 0x0000_0001,
AdminWarning = 0x0000_0002,
AdminInfo = 0x0000_0004,
// 0x0000_0008,
RequestError = 0x0000_0010,
RequestWarning = 0x0000_0020,
RequestInfo = 0x0000_0040,
RequestTrace = 0x0000_0080,
SecurityCritical = 0x0000_0100,
SecurityInfo = 0x0000_0200,
SecurityAccess = 0x0000_0400,
// 0x0000_0800
FilterError = 0x0000_1000,
FilterWarning = 0x0000_2000,
FilterInfo = 0x0000_4000,
FilterTrace = 0x0000_8000,
// 0x0001_0000 -> 0x0800_0000
PerfOp = 0x1000_0000,
PerfCoarse = 0x2000_0000,
PerfTrace = 0x4000_0000,
Trace = 0x8000_0000,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[repr(u32)]
pub enum LogLevel {
// Errors only
Quiet = 0x0000_1111,
// All Error, All Security, Request and Admin Warning,
Default = 0x0000_1111 | 0x0000_0f00 | 0x0000_0022 | 0x1000_0000,
// Default + Filter Plans
Filter = 0x0000_1111 | 0x0000_0f00 | 0x0000_0022 | 0x0000_4000 | 0x1000_0000,
// All Error, All Warning, All Info, Filter and Request Tracing
Verbose = 0x0000_ffff | 0x1000_0000,
// Default + PerfCoarse
PerfBasic = 0x0000_1111 | 0x0000_0f00 | 0x0000_0022 | 0x3000_0000,
// Default + PerfCoarse ? PerfTrace
PerfFull = 0x0000_1111 | 0x0000_0f00 | 0x0000_0022 | 0x7000_0000,
// Yolo
FullTrace = 0xffff_ffff,
}
impl FromStr for LogLevel {
type Err = &'static str;
fn from_str(l: &str) -> Result<Self, Self::Err> {
match l.to_lowercase().as_str() {
"quiet" => Ok(LogLevel::Quiet),
"default" => Ok(LogLevel::Default),
"filter" => Ok(LogLevel::Filter),
"verbose" => Ok(LogLevel::Verbose),
"perfbasic" => Ok(LogLevel::PerfBasic),
"perffull" => Ok(LogLevel::PerfFull),
"fulltrace" => Ok(LogLevel::FullTrace),
_ => Err("Could not parse loglevel"),
}
}
}
impl fmt::Display for LogTag {
@ -31,63 +79,57 @@ impl fmt::Display for LogTag {
LogTag::AdminWarning => write!(f, "admin::warning 🚧"),
LogTag::AdminInfo => write!(f, "admin::info"),
LogTag::RequestError => write!(f, "request::error 🚨"),
LogTag::Security => write!(f, "security 🔐"),
LogTag::RequestWarning => write!(f, "request::warning"),
LogTag::RequestInfo => write!(f, "request::info"),
LogTag::RequestTrace => write!(f, "request::trace"),
LogTag::SecurityCritical => write!(f, "security::critical 🐟"),
LogTag::SecurityInfo => write!(f, "security::info 🔐"),
LogTag::SecurityAccess => write!(f, "security::access 🔓"),
LogTag::Filter => write!(f, "filter"),
LogTag::FilterWarning => write!(f, "filter::warning 🚧"),
LogTag::FilterError => write!(f, "filter::error 🚨"),
LogTag::Trace => write!(f, "trace ⌦"),
LogTag::FilterWarning => write!(f, "filter::warning 🚧"),
LogTag::FilterInfo => write!(f, "filter::info"),
LogTag::FilterTrace => write!(f, "filter::trace"),
LogTag::PerfOp | LogTag::PerfCoarse | LogTag::PerfTrace => write!(f, "perf::trace "),
LogTag::Trace => write!(f, "trace::⌦"),
}
}
}
macro_rules! audit_log {
($audit:expr, $($arg:tt)*) => ({
use std::fmt;
use crate::audit::LogTag;
/*
if cfg!(test) || cfg!(debug_assertions) {
eprintln!($($arg)*)
}
*/
$audit.log_event(
LogTag::AdminError,
fmt::format(
format_args!($($arg)*)
)
)
})
}
macro_rules! lqueue {
($au:expr, $tag:expr, $($arg:tt)*) => ({
use crate::audit::LogTag;
/*
if cfg!(test) {
println!($($arg)*)
}
use std::fmt;
use crate::audit::LogTag;
$au.log_event(
$tag,
fmt::format(
format_args!($($arg)*)
*/
if ($au.level & $tag as u32) == $tag as u32 {
use std::fmt;
$au.log_event(
$tag,
fmt::format(
format_args!($($arg)*)
)
)
)
}
})
}
macro_rules! ltrace {
($au:expr, $($arg:tt)*) => ({
if log_enabled!(log::Level::Debug) || cfg!(test) {
lqueue!($au, LogTag::Trace, $($arg)*)
}
lqueue!($au, LogTag::Trace, $($arg)*)
})
}
macro_rules! lfilter {
($au:expr, $($arg:tt)*) => ({
if log_enabled!(log::Level::Info) || cfg!(test) {
lqueue!($au, LogTag::Filter, $($arg)*)
}
lqueue!($au, LogTag::FilterTrace, $($arg)*)
})
}
macro_rules! lfilter_info {
($au:expr, $($arg:tt)*) => ({
lqueue!($au, LogTag::FilterInfo, $($arg)*)
})
}
@ -119,9 +161,7 @@ macro_rules! ladmin_warning {
macro_rules! ladmin_info {
($au:expr, $($arg:tt)*) => ({
if log_enabled!(log::Level::Info) || cfg!(test) {
lqueue!($au, LogTag::AdminInfo, $($arg)*)
}
lqueue!($au, LogTag::AdminInfo, $($arg)*)
})
}
@ -133,7 +173,13 @@ macro_rules! lrequest_error {
macro_rules! lsecurity {
($au:expr, $($arg:tt)*) => ({
lqueue!($au, LogTag::Security, $($arg)*)
lqueue!($au, LogTag::SecurityInfo, $($arg)*)
})
}
macro_rules! lsecurity_critical {
($au:expr, $($arg:tt)*) => ({
lqueue!($au, LogTag::SecurityCritical, $($arg)*)
})
}
@ -143,9 +189,30 @@ macro_rules! lsecurity_access {
})
}
macro_rules! lperf_op_segment {
($au:expr, $id:expr, $fun:expr) => {{
use crate::audit::LogTag;
lperf_tag_segment!($au, $id, LogTag::PerfOp, $fun)
}};
}
macro_rules! lperf_trace_segment {
($au:expr, $id:expr, $fun:expr) => {{
use crate::audit::LogTag;
lperf_tag_segment!($au, $id, LogTag::PerfTrace, $fun)
}};
}
macro_rules! lperf_segment {
($au:expr, $id:expr, $fun:expr) => {{
if log_enabled!(log::Level::Debug) || cfg!(test) {
use crate::audit::LogTag;
lperf_tag_segment!($au, $id, LogTag::PerfCoarse, $fun)
}};
}
macro_rules! lperf_tag_segment {
($au:expr, $id:expr, $tag:expr, $fun:expr) => {{
if ($au.level & $tag as u32) == $tag as u32 {
use std::time::Instant;
// start timer.
@ -174,12 +241,34 @@ macro_rules! lperf_segment {
}};
}
/*
macro_rules! limmediate_error {
($au:expr, $($arg:tt)*) => ({
use crate::audit::LogTag;
if ($au.level & LogTag::AdminError as u32) == LogTag::AdminError as u32 {
eprintln!($($arg)*)
}
})
}
*/
macro_rules! limmediate_warning {
($au:expr, $($arg:tt)*) => ({
use crate::audit::LogTag;
if ($au.level & LogTag::AdminWarning as u32) == LogTag::AdminWarning as u32 {
eprint!($($arg)*)
}
})
}
/*
macro_rules! try_audit {
($audit:ident, $result:expr, $logFormat:expr, $errorType:expr) => {
match $result {
Ok(v) => v,
Err(e) => {
audit_log!($audit, $logFormat, e);
ladmin_error!($audit, $logFormat, e);
return Err($errorType);
}
}
@ -188,7 +277,7 @@ macro_rules! try_audit {
match $result {
Ok(v) => v,
Err(e) => {
audit_log!($audit, $logFormat, e);
ladmin_error!($audit, $logFormat, e);
return Err(e);
}
}
@ -197,16 +286,16 @@ macro_rules! try_audit {
match $result {
Ok(v) => v,
Err(e) => {
audit_log!($audit, "error @ {} {} -> {:?}", file!(), line!(), e);
ladmin_error!($audit, "error @ {} {} -> {:?}", file!(), line!(), e);
return Err(e);
}
}
};
}
*/
#[derive(Debug, Serialize, Deserialize)]
struct AuditLog {
time: String,
tag: LogTag,
data: String,
}
@ -228,14 +317,10 @@ impl PerfEvent {
.map(|pe| pe.process_inner(opd))
.collect();
contains.sort_unstable();
let duration = self
.duration
.as_ref()
.expect("corrupted perf event")
.clone();
let duration = self.duration.as_ref().expect("corrupted perf event");
let percent = (duration.as_secs_f64() / opd.as_secs_f64()) * 100.0;
PerfProcessed {
duration,
duration: *duration,
id: self.id.clone(),
percent,
contains,
@ -243,11 +328,7 @@ impl PerfEvent {
}
fn process(&self) -> PerfProcessed {
let duration = self
.duration
.as_ref()
.expect("corrupted perf event")
.clone();
let duration = self.duration.as_ref().expect("corrupted perf event");
let mut contains: Vec<_> = self
.contains
.iter()
@ -255,7 +336,7 @@ impl PerfEvent {
.collect();
contains.sort_unstable();
PerfProcessed {
duration,
duration: *duration,
id: self.id.clone(),
percent: 100.0,
contains,
@ -301,23 +382,27 @@ impl PartialEq for PerfProcessed {
* | |--> another layer
*/
impl PerfProcessed {
fn int_write_fmt(&self, parents: usize, uuid: &HyphenatedRef) {
let mut prefix = String::new();
prefix.push_str("[- perf::trace] ");
fn int_write_fmt(&self, parents: usize, header: &str) {
eprint!("{}", header);
// let mut prefix = header.to_string();
let d = &self.duration;
let df = d.as_secs() as f64 + d.subsec_nanos() as f64 * 1e-9;
if parents > 0 {
for _i in 0..parents {
prefix.push_str("| ");
// prefix.push_str("| ");
eprint!("| ");
}
};
eprintln!("|--> {} {1:.9} {2:.3}%", self.id, df, self.percent);
/*
eprintln!(
"{}|--> {} {2:.9} {3:.3}%",
prefix, self.id, df, self.percent
);
*/
self.contains
.iter()
.for_each(|pe| pe.int_write_fmt(parents + 1, uuid))
.for_each(|pe| pe.int_write_fmt(parents + 1, header))
}
}
@ -329,6 +414,8 @@ pub struct AuditScope {
// vec of start/end points of various parts of the event?
// We probably need some functions for this. Is there a way in rust
// to automatically annotate line numbers of code?
#[serde(skip_serializing)]
pub level: u32,
uuid: Uuid,
events: Vec<AuditLog>,
perf: Vec<PerfEvent>,
@ -345,17 +432,43 @@ impl Message for AuditScope {
}
impl AuditScope {
pub fn new(name: &str, eventid: Uuid) -> Self {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
pub fn new(name: &str, eventid: Uuid, level: Option<u32>) -> Self {
let level = if cfg!(test) {
LogLevel::FullTrace as u32
} else {
level.unwrap_or(LogLevel::Default as u32)
};
// Try to reduce re-allocs by pre-allocating the amount we will likely need.
let mut events = if level == LogLevel::FullTrace as u32 {
Vec::with_capacity(512)
} else if (level & LogLevel::PerfFull as u32) == LogLevel::PerfFull as u32 {
Vec::with_capacity(256)
} else if (level & LogLevel::PerfBasic as u32) == LogLevel::PerfBasic as u32
|| (level & LogLevel::Verbose as u32) == LogLevel::Verbose as u32
{
Vec::with_capacity(64)
} else if level == LogLevel::Quiet as u32 {
Vec::with_capacity(0)
} else {
// (level & LogTag::Filter as u32) == LogTag::Filter as u32
// (level & LogTag::Default as u32) == LogTag::Default as u32
Vec::with_capacity(16)
};
if (level & LogTag::AdminInfo as u32) == LogTag::AdminInfo as u32 {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
events.push(AuditLog {
tag: LogTag::AdminInfo,
data: format!("{} {}", name, datetime.to_rfc3339()),
})
}
AuditScope {
level,
uuid: eventid,
events: vec![AuditLog {
time: datetime.to_rfc3339(),
tag: LogTag::AdminInfo,
data: format!("start {}", name),
}],
events,
perf: vec![],
active_perf: None,
}
@ -363,62 +476,34 @@ impl AuditScope {
pub fn write_log(self) {
let uuid_ref = self.uuid.to_hyphenated_ref();
if log_enabled!(log::Level::Warn) {
eprintln!("[- event::start] {}", uuid_ref);
}
self.events.iter().for_each(|e| match e.tag {
LogTag::AdminError => eprintln!("[{} {}] {} {}", e.time, e.tag, uuid_ref, e.data),
LogTag::RequestError | LogTag::FilterError => {
if log_enabled!(log::Level::Warn) {
eprintln!("[{} {}] {}", e.time, e.tag, e.data)
}
}
LogTag::AdminWarning
| LogTag::Security
| LogTag::SecurityAccess
| LogTag::FilterWarning => {
if log_enabled!(log::Level::Warn) {
eprintln!("[{} {}] {}", e.time, e.tag, e.data)
}
}
LogTag::AdminInfo | LogTag::Filter => {
if log_enabled!(log::Level::Info) {
eprintln!("[{} {}] {}", e.time, e.tag, e.data)
}
}
LogTag::Trace => {
if log_enabled!(log::Level::Debug) {
eprintln!("[{} {}] {}", e.time, e.tag, e.data)
}
}
});
self.events
.iter()
.for_each(|e| eprintln!("[{} {}] {}", uuid_ref, e.tag, e.data));
if log_enabled!(log::Level::Warn) {
eprintln!("[- event::end] {}", uuid_ref);
}
// First, we pre-process all the perf events to order them
let mut proc_perf: Vec<_> = self.perf.iter().map(|pe| pe.process()).collect();
// We still sort them by duration.
proc_perf.sort_unstable();
let header = format!("[{} perf::trace] ", uuid_ref);
// Now write the perf events
proc_perf
.iter()
.for_each(|pe| pe.int_write_fmt(0, &uuid_ref));
.for_each(|pe| pe.int_write_fmt(0, header.as_str()));
if log_enabled!(log::Level::Debug) {
eprintln!("[- perf::trace] end: {}", uuid_ref);
eprintln!("[{} perf::trace] -", uuid_ref);
}
}
pub fn log_event(&mut self, tag: LogTag, data: String) {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
// let t_now = SystemTime::now();
// let datetime: DateTime<Utc> = t_now.into();
self.events.push(AuditLog {
time: datetime.to_rfc3339(),
// time: datetime.to_rfc3339(),
tag,
data: data,
data,
})
}
@ -482,7 +567,7 @@ mod tests {
// Create and remove. Perhaps add some core details?
#[test]
fn test_audit_simple() {
let au = AuditScope::new("au", uuid::Uuid::new_v4());
let au = AuditScope::new("au", uuid::Uuid::new_v4(), None);
let d = serde_json::to_string_pretty(&au).expect("Json serialise failure");
debug!("{}", d);
}

View file

@ -33,7 +33,7 @@ enum NameCacheKey {
enum NameCacheValue {
U(Uuid),
R(String),
S(Value),
S(Box<Value>),
}
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
@ -89,7 +89,7 @@ macro_rules! get_identry {
$au:expr,
$idl:expr
) => {{
lperf_segment!($au, "be::idl_arc_sqlite::get_identry", || {
lperf_trace_segment!($au, "be::idl_arc_sqlite::get_identry", || {
match $idl {
IDL::Partial(idli) | IDL::PartialThreshold(idli) | IDL::Indexed(idli) => {
let mut result: Vec<Entry<_, _>> = Vec::new();
@ -105,7 +105,6 @@ macro_rules! get_identry {
});
if !nidl.is_empty() {
ladmin_warning!($au, "idl_arc_sqlite cache miss detected - if this occurs frequently you SHOULD adjust your cache tuning.");
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
@ -158,8 +157,8 @@ macro_rules! get_idl {
$itype:expr,
$idx_key:expr
) => {{
lperf_segment!($audit, "be::idl_arc_sqlite::get_idl", || {
// TODO: Find a way to implement borrow for this properly
lperf_trace_segment!($audit, "be::idl_arc_sqlite::get_idl", || {
// TODO #259: Find a way to implement borrow for this properly
// First attempt to get from this cache.
let cache_key = IdlCacheKey {
a: $attr.to_string(),
@ -194,7 +193,7 @@ macro_rules! name2uuid {
$audit:expr,
$name:expr
) => {{
lperf_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
let cache_key = NameCacheKey::Name2Uuid($name.to_string());
let cache_r = $self.name_cache.get(&cache_key);
if let Some(NameCacheValue::U(uuid)) = cache_r {
@ -219,19 +218,19 @@ macro_rules! uuid2spn {
$audit:expr,
$uuid:expr
) => {{
lperf_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
let cache_key = NameCacheKey::Uuid2Spn(*$uuid);
let cache_r = $self.name_cache.get(&cache_key);
if let Some(NameCacheValue::S(ref spn)) = cache_r {
ltrace!($audit, "Got cached spn for uuid2spn");
return Ok(Some(spn.clone()));
return Ok(Some(spn.as_ref().clone()));
}
let db_r = $self.db.uuid2spn($audit, $uuid)?;
if let Some(ref data) = db_r {
$self
.name_cache
.insert(cache_key, NameCacheValue::S(data.clone()))
.insert(cache_key, NameCacheValue::S(Box::new(data.clone())))
}
Ok(db_r)
})
@ -244,7 +243,7 @@ macro_rules! uuid2rdn {
$audit:expr,
$uuid:expr
) => {{
lperf_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || {
let cache_key = NameCacheKey::Uuid2Rdn(*$uuid);
let cache_r = $self.name_cache.get(&cache_key);
if let Some(NameCacheValue::R(ref rdn)) = cache_r {
@ -464,7 +463,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
impl<'a> IdlArcSqliteWriteTransaction<'a> {
pub fn commit(self, audit: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::commit", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || {
let IdlArcSqliteWriteTransaction {
db,
entry_cache,
@ -473,12 +472,12 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
op_ts_max,
} = self;
// Undo the caches in the reverse order.
db.commit(audit).and_then(|r| {
db.commit(audit).and_then(|()| {
op_ts_max.commit();
name_cache.commit();
idl_cache.commit();
entry_cache.commit();
Ok(r)
Ok(())
})
})
}
@ -497,7 +496,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
where
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
{
lperf_segment!(au, "be::idl_arc_sqlite::write_identries", || {
lperf_trace_segment!(au, "be::idl_arc_sqlite::write_identries", || {
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
@ -527,7 +526,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
where
I: Iterator<Item = u64>,
{
lperf_segment!(au, "be::idl_arc_sqlite::delete_identry", || {
lperf_trace_segment!(au, "be::idl_arc_sqlite::delete_identry", || {
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
@ -547,7 +546,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
idx_key: &str,
idl: &IDLBitRange,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::write_idl", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_idl", || {
let cache_key = IdlCacheKey {
a: attr.to_string(),
i: itype.clone(),
@ -556,7 +555,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
// On idl == 0 the db will remove this, and synthesise an empty IDL on a miss
// but we can cache this as a new empty IDL instead, so that we can avoid the
// db lookup on this idl.
if idl.len() == 0 {
if idl.is_empty() {
self.idl_cache
.insert(cache_key, Box::new(IDLBitRange::new()));
} else {
@ -576,7 +575,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
uuid: &Uuid,
add: BTreeSet<String>,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
self.db
.write_name2uuid_add(audit, uuid, &add)
.and_then(|_| {
@ -595,7 +594,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
audit: &mut AuditScope,
rem: BTreeSet<String>,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
self.db.write_name2uuid_rem(audit, &rem).and_then(|_| {
rem.into_iter().for_each(|k| {
let cache_key = NameCacheKey::Name2Uuid(k);
@ -616,13 +615,15 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
uuid: &Uuid,
k: Option<Value>,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::write_uuid2spn", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2spn", || {
self.db
.write_uuid2spn(audit, uuid, k.as_ref())
.and_then(|_| {
let cache_key = NameCacheKey::Uuid2Spn(uuid.clone());
let cache_key = NameCacheKey::Uuid2Spn(*uuid);
match k {
Some(v) => self.name_cache.insert(cache_key, NameCacheValue::S(v)),
Some(v) => self
.name_cache
.insert(cache_key, NameCacheValue::S(Box::new(v))),
None => self.name_cache.remove(cache_key),
}
Ok(())
@ -640,11 +641,11 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
uuid: &Uuid,
k: Option<String>,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_arc_sqlite::write_uuid2rdn", || {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2rdn", || {
self.db
.write_uuid2rdn(audit, uuid, k.as_ref())
.and_then(|_| {
let cache_key = NameCacheKey::Uuid2Rdn(uuid.clone());
let cache_key = NameCacheKey::Uuid2Rdn(*uuid);
match k {
Some(s) => self.name_cache.insert(cache_key, NameCacheValue::R(s)),
None => self.name_cache.remove(cache_key),
@ -670,16 +671,16 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
}
pub unsafe fn purge_idxs(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.purge_idxs(audit).and_then(|r| {
self.db.purge_idxs(audit).and_then(|()| {
self.idl_cache.clear();
Ok(r)
Ok(())
})
}
pub unsafe fn purge_id2entry(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.purge_id2entry(audit).and_then(|r| {
self.db.purge_id2entry(audit).and_then(|()| {
self.entry_cache.clear();
Ok(r)
Ok(())
})
}
@ -692,13 +693,13 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
}
pub fn set_db_ts_max(&mut self, ts: &Duration) -> Result<(), OperationError> {
*self.op_ts_max = Some(ts.clone());
*self.op_ts_max = Some(*ts);
self.db.set_db_ts_max(ts)
}
pub fn get_db_ts_max(&self) -> Result<Option<Duration>, OperationError> {
match *self.op_ts_max {
Some(ts) => Ok(Some(ts.clone())),
Some(ts) => Ok(Some(ts)),
None => self.db.get_db_ts_max(),
}
}

View file

@ -45,7 +45,7 @@ impl TryFrom<IdRawEntry> for IdSqliteEntry {
type Error = OperationError;
fn try_from(value: IdRawEntry) -> Result<Self, Self::Error> {
if value.id <= 0 {
if value.id == 0 {
return Err(OperationError::InvalidEntryID);
}
Ok(IdSqliteEntry {
@ -81,7 +81,7 @@ pub trait IdlSqliteTransaction {
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
lperf_segment!(au, "be::idl_sqlite::get_identry", || {
lperf_trace_segment!(au, "be::idl_sqlite::get_identry", || {
self.get_identry_raw(au, idl)?
.into_iter()
.map(|ide| ide.into_entry(au))
@ -97,21 +97,24 @@ pub trait IdlSqliteTransaction {
// is the idl allids?
match idl {
IDL::ALLIDS => {
let mut stmt = try_audit!(
au,
self.get_conn().prepare("SELECT id, data FROM id2entry"),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let id2entry_iter = try_audit!(
au,
stmt.query_map(NO_PARAMS, |row| Ok(IdSqliteEntry {
id: row.get(0)?,
data: row.get(1)?,
})),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT id, data FROM id2entry")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let id2entry_iter = stmt
.query_map(NO_PARAMS, |row| {
Ok(IdSqliteEntry {
id: row.get(0)?,
data: row.get(1)?,
})
})
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
id2entry_iter
.map(|v| {
v.map_err(|e| {
@ -126,19 +129,19 @@ pub trait IdlSqliteTransaction {
.collect()
}
IDL::Partial(idli) | IDL::PartialThreshold(idli) | IDL::Indexed(idli) => {
let mut stmt = try_audit!(
au,
self.get_conn()
.prepare("SELECT id, data FROM id2entry WHERE id = :idl"),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT id, data FROM id2entry WHERE id = :idl")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
// TODO: I have no idea how to make this an iterator chain ... so what
// TODO #258: Can this actually just load in a single select?
// TODO #258: I have no idea how to make this an iterator chain ... so what
// I have now is probably really bad :(
let mut results = Vec::new();
// TODO: Can this actually just load in a single select?
/*
let decompressed: Result<Vec<i64>, _> = idli.into_iter()
.map(|u| i64::try_from(u).map_err(|_| OperationError::InvalidEntryID))
@ -186,19 +189,19 @@ pub trait IdlSqliteTransaction {
itype: &IndexType,
) -> Result<bool, OperationError> {
let tname = format!("idx_{}_{}", itype.as_idx_str(), attr);
let mut stmt = try_audit!(
audit,
self.get_conn()
.prepare("SELECT COUNT(name) from sqlite_master where name = :tname"),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let i: Option<i64> = try_audit!(
audit,
stmt.query_row_named(&[(":tname", &tname)], |row| row.get(0)),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT COUNT(name) from sqlite_master where name = :tname")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let i: Option<i64> = stmt
.query_row_named(&[(":tname", &tname)], |row| row.get(0))
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
if i.unwrap_or(0) == 0 {
Ok(false)
@ -214,7 +217,7 @@ pub trait IdlSqliteTransaction {
itype: &IndexType,
idx_key: &str,
) -> Result<Option<IDLBitRange>, OperationError> {
lperf_segment!(audit, "be::idl_sqlite::get_idl", || {
lperf_trace_segment!(audit, "be::idl_sqlite::get_idl", || {
if !(self.exists_idx(audit, attr, itype)?) {
lfilter_error!(audit, "Index {:?} {:?} not found", itype, attr);
return Ok(None);
@ -226,20 +229,18 @@ pub trait IdlSqliteTransaction {
itype.as_idx_str(),
attr
);
let mut stmt = try_audit!(
audit,
self.get_conn().prepare(query.as_str()),
"SQLite Error {:?}",
let mut stmt = self.get_conn().prepare(query.as_str()).map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
);
let idl_raw: Option<Vec<u8>> = try_audit!(
audit,
stmt.query_row_named(&[(":idx_key", &idx_key)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional(),
"SQLite Error {:?}",
OperationError::SQLiteError
);
})?;
let idl_raw: Option<Vec<u8>> = stmt
.query_row_named(&[(":idx_key", &idx_key)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional()
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let idl = match idl_raw {
Some(d) => serde_cbor::from_slice(d.as_slice())
@ -259,23 +260,23 @@ pub trait IdlSqliteTransaction {
audit: &mut AuditScope,
name: &str,
) -> Result<Option<Uuid>, OperationError> {
lperf_segment!(audit, "be::idl_sqlite::name2uuid", || {
lperf_trace_segment!(audit, "be::idl_sqlite::name2uuid", || {
// The table exists - lets now get the actual index itself.
let mut stmt = try_audit!(
audit,
self.get_conn()
.prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name",),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let uuid_raw: Option<String> = try_audit!(
audit,
stmt.query_row_named(&[(":name", &name)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional(),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let uuid_raw: Option<String> = stmt
.query_row_named(&[(":name", &name)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional()
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok());
ltrace!(audit, "Got uuid for index name {} -> {:?}", name, uuid);
@ -289,24 +290,24 @@ pub trait IdlSqliteTransaction {
audit: &mut AuditScope,
uuid: &Uuid,
) -> Result<Option<Value>, OperationError> {
lperf_segment!(audit, "be::idl_sqlite::uuid2spn", || {
lperf_trace_segment!(audit, "be::idl_sqlite::uuid2spn", || {
let uuids = uuid.to_hyphenated_ref().to_string();
// The table exists - lets now get the actual index itself.
let mut stmt = try_audit!(
audit,
self.get_conn()
.prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid",),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let spn_raw: Option<Vec<u8>> = try_audit!(
audit,
stmt.query_row_named(&[(":uuid", &uuids)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional(),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let spn_raw: Option<Vec<u8>> = stmt
.query_row_named(&[(":uuid", &uuids)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional()
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let spn: Option<Value> = match spn_raw {
Some(d) => {
@ -330,24 +331,24 @@ pub trait IdlSqliteTransaction {
audit: &mut AuditScope,
uuid: &Uuid,
) -> Result<Option<String>, OperationError> {
lperf_segment!(audit, "be::idl_sqlite::uuid2rdn", || {
lperf_trace_segment!(audit, "be::idl_sqlite::uuid2rdn", || {
let uuids = uuid.to_hyphenated_ref().to_string();
// The table exists - lets now get the actual index itself.
let mut stmt = try_audit!(
audit,
self.get_conn()
.prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid",),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let rdn: Option<String> = try_audit!(
audit,
stmt.query_row_named(&[(":uuid", &uuids)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional(),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.get_conn()
.prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let rdn: Option<String> = stmt
.query_row_named(&[(":uuid", &uuids)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional()
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
ltrace!(audit, "Got rdn for uuid {:?} -> {:?}", uuid, rdn);
@ -407,6 +408,7 @@ pub trait IdlSqliteTransaction {
})
}
// This allow is critical as it resolves a life time issue in stmt.
#[allow(clippy::let_and_return)]
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
let mut stmt = match self.get_conn().prepare("PRAGMA integrity_check;") {
@ -505,7 +507,7 @@ impl IdlSqliteWriteTransaction {
}
pub fn commit(mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_sqlite::commit", || {
lperf_trace_segment!(audit, "be::idl_sqlite::commit", || {
// ltrace!(audit, "Commiting BE WR txn");
assert!(!self.committed);
self.committed = true;
@ -553,7 +555,7 @@ impl IdlSqliteWriteTransaction {
where
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
{
lperf_segment!(au, "be::idl_sqlite::write_identries", || {
lperf_trace_segment!(au, "be::idl_sqlite::write_identries", || {
let raw_entries: Result<Vec<_>, _> = entries
.map(|e| {
let dbe = e.to_dbentry();
@ -578,32 +580,32 @@ impl IdlSqliteWriteTransaction {
where
I: Iterator<Item = IdRawEntry>,
{
let mut stmt = try_audit!(
au,
self.conn
.prepare("INSERT OR REPLACE INTO id2entry (id, data) VALUES(:id, :data)"),
"RusqliteError: {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.conn
.prepare("INSERT OR REPLACE INTO id2entry (id, data) VALUES(:id, :data)")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
try_audit!(
au,
entries.try_for_each(|e| {
let ser_ent = IdSqliteEntry::try_from(e)?;
entries.try_for_each(|e| {
IdSqliteEntry::try_from(e).and_then(|ser_ent| {
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)])
// remove the updated usize
.map(|_| ())
.map_err(|_| OperationError::SQLiteError)
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
})
);
Ok(())
})
}
pub fn delete_identry<I>(&self, au: &mut AuditScope, mut idl: I) -> Result<(), OperationError>
where
I: Iterator<Item = u64>,
{
lperf_segment!(au, "be::idl_sqlite::delete_identry", || {
lperf_trace_segment!(au, "be::idl_sqlite::delete_identry", || {
let mut stmt = self
.conn
.prepare("DELETE FROM id2entry WHERE id = :id")
@ -642,8 +644,8 @@ impl IdlSqliteWriteTransaction {
idx_key: &str,
idl: &IDLBitRange,
) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_sqlite::write_idl", || {
if idl.len() == 0 {
lperf_trace_segment!(audit, "be::idl_sqlite::write_idl", || {
if idl.is_empty() {
ltrace!(audit, "purging idl -> {:?}", idl);
// delete it
// Delete this idx_key from the table.
@ -691,16 +693,16 @@ impl IdlSqliteWriteTransaction {
}
pub fn create_name2uuid(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS idx_name2uuid (name TEXT PRIMARY KEY, uuid TEXT)",
NO_PARAMS
),
"sqlite error {:?}",
OperationError::SQLiteError
);
Ok(())
NO_PARAMS,
)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn write_name2uuid_add(
@ -745,16 +747,16 @@ impl IdlSqliteWriteTransaction {
}
pub fn create_uuid2spn(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS idx_uuid2spn (uuid TEXT PRIMARY KEY, spn BLOB)",
NO_PARAMS
),
"sqlite error {:?}",
OperationError::SQLiteError
);
Ok(())
NO_PARAMS,
)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn write_uuid2spn(
@ -795,16 +797,16 @@ impl IdlSqliteWriteTransaction {
}
pub fn create_uuid2rdn(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS idx_uuid2rdn (uuid TEXT PRIMARY KEY, rdn TEXT)",
NO_PARAMS
),
"sqlite error {:?}",
OperationError::SQLiteError
);
Ok(())
NO_PARAMS,
)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn write_uuid2rdn(
@ -857,29 +859,27 @@ impl IdlSqliteWriteTransaction {
);
ltrace!(audit, "Creating index -> {}", idx_stmt);
try_audit!(
audit,
self.conn.execute(idx_stmt.as_str(), NO_PARAMS),
"sqlite error {:?}",
OperationError::SQLiteError
);
Ok(())
self.conn
.execute(idx_stmt.as_str(), NO_PARAMS)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn list_idxs(&self, audit: &mut AuditScope) -> Result<Vec<String>, OperationError> {
let mut stmt = try_audit!(
audit,
self.get_conn()
.prepare("SELECT name from sqlite_master where type='table' and name LIKE 'idx_%'"),
"SQLite Error {:?}",
let mut stmt = self
.get_conn()
.prepare("SELECT name from sqlite_master where type='table' and name LIKE 'idx_%'")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let idx_table_iter = stmt.query_map(NO_PARAMS, |row| row.get(0)).map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
);
let idx_table_iter = try_audit!(
audit,
stmt.query_map(NO_PARAMS, |row| row.get(0)),
"SQLite Error {:?}",
OperationError::SQLiteError
);
})?;
let r: Result<_, _> = idx_table_iter
.map(|v| {
@ -909,14 +909,14 @@ impl IdlSqliteWriteTransaction {
}
pub unsafe fn purge_id2entry(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
try_audit!(
audit,
self.conn.execute("DELETE FROM id2entry", NO_PARAMS),
"rustqlite error {:?}",
OperationError::SQLiteError
);
ltrace!(audit, "purge id2entry ...");
Ok(())
self.conn
.execute("DELETE FROM id2entry", NO_PARAMS)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})
}
pub fn write_db_s_uuid(&self, nsid: Uuid) -> Result<(), OperationError> {
@ -929,8 +929,7 @@ impl IdlSqliteWriteTransaction {
)
.map(|_| ())
.map_err(|e| {
error!("rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {:?}", e);
OperationError::SQLiteError
})
}
@ -945,8 +944,7 @@ impl IdlSqliteWriteTransaction {
)
.map(|_| ())
.map_err(|e| {
error!("rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {:?}", e);
OperationError::SQLiteError
})
}
@ -961,8 +959,7 @@ impl IdlSqliteWriteTransaction {
)
.map(|_| ())
.map_err(|e| {
error!("rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {:?}", e);
OperationError::SQLiteError
})
}
@ -1024,7 +1021,7 @@ impl IdlSqliteWriteTransaction {
pub(crate) fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
self.set_db_version_key(DBV_INDEXV, v).map_err(|e| {
error!("sqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {:?}", e);
OperationError::SQLiteError
})
}
@ -1034,18 +1031,13 @@ impl IdlSqliteWriteTransaction {
//
// We have to use stmt + prepare because execute can't handle
// the "wal" row on result when this works!
let mut wal_stmt = try_audit!(
audit,
self.conn.prepare("PRAGMA journal_mode=WAL;"),
"sqlite error {:?}",
OperationError::SQLiteError
);
try_audit!(
audit,
wal_stmt.query(NO_PARAMS),
"sqlite error {:?}",
OperationError::SQLiteError
);
self.conn
.prepare("PRAGMA journal_mode=WAL;")
.and_then(|mut wal_stmt| wal_stmt.query(NO_PARAMS).map(|_| ()))
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
// This stores versions of components. For example:
// ----------------------
@ -1059,19 +1051,19 @@ impl IdlSqliteWriteTransaction {
// rolled back individually, by upgraded in isolation, and more
//
// NEVER CHANGE THIS DEFINITION.
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS db_version (
id TEXT PRIMARY KEY,
version INTEGER
)
",
NO_PARAMS,
),
"sqlite error {:?}",
OperationError::SQLiteError
);
)
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
// If the table is empty, populate the versions as 0.
let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY);
@ -1080,32 +1072,30 @@ impl IdlSqliteWriteTransaction {
// Check db_version here.
// * if 0 -> create v1.
if dbv_id2entry == 0 {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS id2entry (
id INTEGER PRIMARY KEY ASC,
data BLOB NOT NULL
)
",
NO_PARAMS,
),
"sqlite error {:?}",
OperationError::SQLiteError
);
try_audit!(
audit,
self.conn.execute(
"CREATE TABLE IF NOT EXISTS db_sid (
)
.and_then(|_| {
self.conn.execute(
"CREATE TABLE IF NOT EXISTS db_sid (
id INTEGER PRIMARY KEY ASC,
data BLOB NOT NULL
)
",
NO_PARAMS,
),
"sqlite error {:?}",
OperationError::SQLiteError
);
NO_PARAMS,
)
})
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
dbv_id2entry = 1;
ladmin_info!(
audit,
@ -1115,37 +1105,38 @@ impl IdlSqliteWriteTransaction {
}
// * if v1 -> add the domain uuid table
if dbv_id2entry == 1 {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS db_did (
id INTEGER PRIMARY KEY ASC,
data BLOB NOT NULL
)
",
NO_PARAMS,
),
"sqlite error {:?}",
OperationError::SQLiteError
);
)
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
dbv_id2entry = 2;
ladmin_info!(audit, "dbv_id2entry migrated (db_did) -> {}", dbv_id2entry);
}
// * if v2 -> add the op max ts table.
if dbv_id2entry == 2 {
try_audit!(
audit,
self.conn.execute(
self.conn
.execute(
"CREATE TABLE IF NOT EXISTS db_op_ts (
id INTEGER PRIMARY KEY ASC,
data BLOB NOT NULL
)
",
NO_PARAMS,
),
"sqlite error {:?}",
OperationError::SQLiteError
);
)
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
dbv_id2entry = 3;
ladmin_info!(
audit,
@ -1167,12 +1158,11 @@ impl IdlSqliteWriteTransaction {
}
// * if v4 -> complete.
try_audit!(
audit,
self.set_db_version_key(DBV_ID2ENTRY, dbv_id2entry),
"sqlite error {:?}",
OperationError::SQLiteError
);
self.set_db_version_key(DBV_ID2ENTRY, dbv_id2entry)
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
OperationError::SQLiteError
})?;
// NOTE: Indexing is configured in a different step!
// Indexing uses a db version flag to represent the version
@ -1226,7 +1216,7 @@ mod tests {
#[test]
fn test_idl_sqlite_verify() {
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None);
let be = IdlSqlite::new(&mut audit, "", 1).unwrap();
let be_w = be.write();
let r = be_w.verify();

View file

@ -82,7 +82,7 @@ pub trait BackendTransaction {
filt: &FilterResolved,
thres: usize,
) -> Result<(IDL, FilterPlan), OperationError> {
let fr = Ok(match filt {
Ok(match filt {
FilterResolved::Eq(attr, value, idx) => {
if *idx {
// Get the idx_key
@ -176,7 +176,7 @@ pub trait BackendTransaction {
(IDL::ALLIDS, fp) => {
plan.push(fp);
// If we find anything unindexed, the whole term is unindexed.
lfilter_error!(au, "Term {:?} is ALLIDS, shortcut return", f);
lfilter!(au, "Term {:?} is ALLIDS, shortcut return", f);
let setplan = FilterPlan::OrUnindexed(plan);
return Ok((IDL::ALLIDS, setplan));
}
@ -234,7 +234,7 @@ pub trait BackendTransaction {
if idl.len() < thres && f_rem_count > 0 {
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(idl.clone()), setplan));
} else if idl.len() == 0 {
} else if idl.is_empty() {
// Regardless of the input state, if it's empty, this can never
// be satisfied, so return we are indexed and complete.
let setplan = FilterPlan::AndEmptyCand(plan);
@ -256,7 +256,7 @@ pub trait BackendTransaction {
// When below thres, we have to return partials to trigger the entry_no_match_filter check.
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else if r.len() == 0 {
} else if r.is_empty() {
// Regardless of the input state, if it's empty, this can never
// be satisfied, so return we are indexed and complete.
let setplan = FilterPlan::AndEmptyCand(plan);
@ -400,9 +400,7 @@ pub trait BackendTransaction {
);
(IDL::Indexed(IDLBitRange::new()), FilterPlan::Invalid)
}
});
// debug!("result of {:?} -> {:?}", filt, fr);
fr
})
}
// Take filter, and AuditScope ref?
@ -413,21 +411,25 @@ pub trait BackendTransaction {
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
// Unlike DS, even if we don't get the index back, we can just pass
// to the in-memory filter test and be done.
lperf_segment!(au, "be::search", || {
lperf_trace_segment!(au, "be::search", || {
// Do a final optimise of the filter
lfilter!(au, "filter unoptimised form --> {:?}", filt);
let filt = lperf_segment!(au, "be::search<filt::optimise>", || { filt.optimise() });
let filt =
lperf_trace_segment!(au, "be::search<filt::optimise>", || { filt.optimise() });
lfilter!(au, "filter optimised to --> {:?}", filt);
// Using the indexes, resolve the IDL here, or ALLIDS.
// Also get if the filter was 100% resolved or not.
let (idl, fplan) = lperf_segment!(au, "be::search -> filter2idl", || {
let (idl, fplan) = lperf_trace_segment!(au, "be::search -> filter2idl", || {
self.filter2idl(au, filt.to_inner(), FILTER_SEARCH_TEST_THRESHOLD)
})?;
lfilter!(au, "filter executed plan -> {:?}", fplan);
lfilter_info!(au, "filter executed plan -> {:?}", fplan);
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e);
e
})?;
// Do other things
// Now, de-serialise the raw_entries back to entries, and populate their ID's
@ -435,11 +437,7 @@ pub trait BackendTransaction {
let entries_filtered = match idl {
IDL::ALLIDS | IDL::Partial(_) => {
lfilter_error!(
au,
"filter (search) was partially or fully unindexed. {:?}",
filt
);
lfilter_error!(au, "filter (search) was partially or fully unindexed.",);
lperf_segment!(au, "be::search<entry::ftest::allids>", || {
entries
.into_iter()
@ -448,7 +446,7 @@ pub trait BackendTransaction {
})
}
IDL::PartialThreshold(_) => {
lperf_segment!(au, "be::search<entry::ftest::thresh>", || {
lperf_trace_segment!(au, "be::search<entry::ftest::thresh>", || {
entries
.into_iter()
.filter(|e| e.entry_match_no_index(&filt))
@ -493,7 +491,7 @@ pub trait BackendTransaction {
au: &mut AuditScope,
filt: &Filter<FilterValidResolved>,
) -> Result<bool, OperationError> {
lperf_segment!(au, "be::exists", || {
lperf_trace_segment!(au, "be::exists", || {
// Do a final optimise of the filter
lfilter!(au, "filter unoptimised form --> {:?}", filt);
let filt = filt.optimise();
@ -501,18 +499,21 @@ pub trait BackendTransaction {
// Using the indexes, resolve the IDL here, or ALLIDS.
// Also get if the filter was 100% resolved or not.
let (idl, fplan) = lperf_segment!(au, "be::exists -> filter2idl", || {
let (idl, fplan) = lperf_trace_segment!(au, "be::exists -> filter2idl", || {
self.filter2idl(au, filt.to_inner(), FILTER_EXISTS_TEST_THRESHOLD)
})?;
lfilter!(au, "filter executed plan -> {:?}", fplan);
lfilter_info!(au, "filter executed plan -> {:?}", fplan);
// Now, check the idl -- if it's fully resolved, we can skip this because the query
// was fully indexed.
match &idl {
IDL::Indexed(idl) => Ok(idl.len() > 0),
IDL::Indexed(idl) => Ok(!idl.is_empty()),
IDL::PartialThreshold(_) => {
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e);
e
})?;
// if not 100% resolved query, apply the filter test.
let entries_filtered: Vec<_> = entries
@ -523,12 +524,11 @@ pub trait BackendTransaction {
Ok(!entries_filtered.is_empty())
}
_ => {
lfilter_error!(
au,
"filter (exists) was partially or fully unindexed {:?}",
filt
);
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
lfilter_error!(au, "filter (exists) was partially or fully unindexed",);
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e);
e
})?;
// if not 100% resolved query, apply the filter test.
let entries_filtered: Vec<_> = entries
@ -563,25 +563,17 @@ pub trait BackendTransaction {
let entries = entries?;
let serialized_entries = serde_json::to_string_pretty(&entries);
let serialized_entries_str = try_audit!(
audit,
serialized_entries,
"serde error {:?}",
let serialized_entries_str = serde_json::to_string_pretty(&entries).map_err(|e| {
ladmin_error!(audit, "serde error {:?}", e);
OperationError::SerdeJsonError
);
})?;
let result = fs::write(dst_path, serialized_entries_str);
try_audit!(
audit,
result,
"fs::write error {:?}",
OperationError::FsError
);
Ok(())
fs::write(dst_path, serialized_entries_str)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "fs::write error {:?}", e);
OperationError::FsError
})
}
fn name2uuid(
@ -631,7 +623,7 @@ impl<'a> BackendWriteTransaction<'a> {
au: &mut AuditScope,
entries: Vec<Entry<EntrySealed, EntryNew>>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
lperf_segment!(au, "be::create", || {
lperf_trace_segment!(au, "be::create", || {
if entries.is_empty() {
ladmin_error!(
au,
@ -670,7 +662,7 @@ impl<'a> BackendWriteTransaction<'a> {
pre_entries: &[Entry<EntrySealed, EntryCommitted>],
post_entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
lperf_segment!(au, "be::modify", || {
lperf_trace_segment!(au, "be::modify", || {
if post_entries.is_empty() || pre_entries.is_empty() {
ladmin_error!(
au,
@ -729,7 +721,7 @@ impl<'a> BackendWriteTransaction<'a> {
au: &mut AuditScope,
entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
lperf_segment!(au, "be::delete", || {
lperf_trace_segment!(au, "be::delete", || {
if entries.is_empty() {
ladmin_error!(
au,
@ -816,9 +808,8 @@ impl<'a> BackendWriteTransaction<'a> {
ltrace!(audit, "!uuid_same u2r_act -> {:?}", u2r_act);
// Write the changes out to the backend
match n2u_rem {
Some(rem) => self.idlayer.write_name2uuid_rem(audit, rem)?,
None => {}
if let Some(rem) = n2u_rem {
self.idlayer.write_name2uuid_rem(audit, rem)?
}
match u2s_act {
@ -851,14 +842,11 @@ impl<'a> BackendWriteTransaction<'a> {
ltrace!(audit, "u2r_act -> {:?}", u2r_act);
// Write the changes out to the backend
match n2u_add {
Some(add) => self.idlayer.write_name2uuid_add(audit, e_uuid, add)?,
None => {}
if let Some(add) = n2u_add {
self.idlayer.write_name2uuid_add(audit, e_uuid, add)?
}
match n2u_rem {
Some(rem) => self.idlayer.write_name2uuid_rem(audit, rem)?,
None => {}
if let Some(rem) = n2u_rem {
self.idlayer.write_name2uuid_rem(audit, rem)?
}
match u2s_act {
@ -977,9 +965,12 @@ impl<'a> BackendWriteTransaction<'a> {
let dbv = self.get_db_index_version();
ladmin_info!(audit, "upgrade_reindex -> dbv: {} v: {}", dbv, v);
if dbv < v {
eprintln!("NOTICE: A system reindex is required. This may take a long time ...");
limmediate_warning!(
audit,
"NOTICE: A system reindex is required. This may take a long time ...\n"
);
self.reindex(audit)?;
eprintln!("NOTICE: System reindex complete");
limmediate_warning!(audit, "NOTICE: System reindex complete\n");
self.set_db_index_version(v)
} else {
Ok(())
@ -997,25 +988,29 @@ impl<'a> BackendWriteTransaction<'a> {
// Future idea: Do this in batches of X amount to limit memory
// consumption.
let idl = IDL::ALLIDS;
let entries = try_audit!(audit, self.idlayer.get_identry(audit, &idl));
let entries = self.idlayer.get_identry(audit, &idl).map_err(|e| {
ladmin_error!(audit, "get_identry failure {:?}", e);
e
})?;
let mut count = 0;
try_audit!(
audit,
entries
.iter()
.try_for_each(|e| {
entries
.iter()
.try_for_each(|e| {
count += 1;
if count % 1000 == 0 {
eprint!("{}", count);
} else if count % 100 == 0 {
eprint!(".");
if count % 2500 == 0 {
limmediate_warning!(audit, "{}", count);
} else if count % 250 == 0 {
limmediate_warning!(audit, ".");
}
self.entry_index(audit, None, Some(e))
})
);
eprintln!("");
.map_err(|e| {
ladmin_error!(audit, "reindex failed -> {:?}", e);
e
})?;
limmediate_warning!(audit, " reindexed {} entries ✅\n", count);
Ok(())
}
@ -1042,26 +1037,23 @@ impl<'a> BackendWriteTransaction<'a> {
) -> Result<(), OperationError> {
// load all entries into RAM, may need to change this later
// if the size of the database compared to RAM is an issue
let serialized_string_option = fs::read_to_string(src_path);
let serialized_string = try_audit!(
audit,
serialized_string_option,
"fs::read_to_string {:?}",
let serialized_string = fs::read_to_string(src_path).map_err(|e| {
ladmin_error!(audit, "fs::read_to_string {:?}", e);
OperationError::FsError
);
})?;
try_audit!(audit, unsafe { self.idlayer.purge_id2entry(audit) });
unsafe { self.idlayer.purge_id2entry(audit) }.map_err(|e| {
ladmin_error!(audit, "purge_id2entry failed {:?}", e);
e
})?;
let dbentries_option: Result<Vec<DbEntry>, serde_json::Error> =
serde_json::from_str(&serialized_string);
let dbentries = try_audit!(
audit,
dbentries_option,
"serde_json error {:?}",
let dbentries = dbentries_option.map_err(|e| {
ladmin_error!(audit, "serde_json error {:?}", e);
OperationError::SerdeJsonError
);
})?;
// Filter all elements that have a UUID in the system range.
/*
@ -1174,7 +1166,7 @@ impl<'a> BackendWriteTransaction<'a> {
// if none, return ts. If found, return it.
match self.get_idlayer().get_db_ts_max()? {
Some(dts) => Ok(dts),
None => Ok(ts.clone()),
None => Ok(*ts),
}
}
@ -1191,7 +1183,7 @@ impl<'a> BackendWriteTransaction<'a> {
impl Backend {
pub fn new(audit: &mut AuditScope, path: &str, pool_size: u32) -> Result<Self, OperationError> {
// this has a ::memory() type, but will path == "" work?
lperf_segment!(audit, "be::new", || {
lperf_trace_segment!(audit, "be::new", || {
let be = Backend {
idlayer: Arc::new(IdlArcSqlite::new(audit, path, pool_size)?),
};
@ -1223,7 +1215,7 @@ impl Backend {
pub fn write(&self, idxmeta: &BTreeSet<(String, IndexType)>) -> BackendWriteTransaction {
BackendWriteTransaction {
idlayer: self.idlayer.write(),
// TODO: Performance improvement here by NOT cloning the idxmeta.
// TODO #257: Performance improvement here by NOT cloning the idxmeta.
idxmeta: (*idxmeta).clone(),
}
}
@ -1269,7 +1261,7 @@ mod tests {
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None);
let be = Backend::new(&mut audit, "", 1).expect("Failed to setup backend");

View file

@ -1,6 +1,5 @@
use rand::prelude::*;
use std::fmt;
use std::path::PathBuf;
#[derive(Serialize, Deserialize, Debug)]
pub struct IntegrationTestConfig {
@ -26,6 +25,7 @@ pub struct Configuration {
pub tls_config: Option<TlsConfiguration>,
pub cookie_key: [u8; 32],
pub integration_test_config: Option<Box<IntegrationTestConfig>>,
pub log_level: Option<u32>,
}
impl fmt::Display for Configuration {
@ -65,20 +65,19 @@ impl Configuration {
tls_config: None,
cookie_key: [0; 32],
integration_test_config: None,
log_level: None,
};
let mut rng = StdRng::from_entropy();
rng.fill(&mut c.cookie_key);
c
}
pub fn update_db_path(&mut self, p: &PathBuf) {
match p.to_str() {
Some(p) => self.db_path = p.to_string(),
None => {
error!("Invalid DB path supplied");
std::process::exit(1);
}
}
pub fn update_log_level(&mut self, log_level: Option<u32>) {
self.log_level = log_level;
}
pub fn update_db_path(&mut self, p: &str) {
self.db_path = p.to_string();
}
pub fn update_bind(&mut self, b: &Option<String>) {
@ -92,36 +91,13 @@ impl Configuration {
self.ldapaddress = l.clone();
}
pub fn update_tls(
&mut self,
ca: &Option<PathBuf>,
cert: &Option<PathBuf>,
key: &Option<PathBuf>,
) {
pub fn update_tls(&mut self, ca: &Option<String>, cert: &Option<String>, key: &Option<String>) {
match (ca, cert, key) {
(None, None, None) => {}
(Some(cap), Some(certp), Some(keyp)) => {
let cas = match cap.to_str() {
Some(cav) => cav.to_string(),
None => {
error!("Invalid CA path");
std::process::exit(1);
}
};
let certs = match certp.to_str() {
Some(certv) => certv.to_string(),
None => {
error!("Invalid Cert path");
std::process::exit(1);
}
};
let keys = match keyp.to_str() {
Some(keyv) => keyv.to_string(),
None => {
error!("Invalid Key path");
std::process::exit(1);
}
};
let cas = cap.to_string();
let certs = certp.to_string();
let keys = keyp.to_string();
self.tls_config = Some(TlsConfiguration {
ca: cas,
cert: certs,

View file

@ -124,7 +124,6 @@ pub const JSON_IDM_SELF_ACP_WRITE_V1: &str = r#"{
}"#;
pub const JSON_IDM_ALL_ACP_READ_V1: &str = r#"{
"state": null,
"attrs": {
"class": ["object", "access_control_profile", "access_control_search"],
"name": ["idm_all_acp_read"],

View file

@ -1,8 +1,4 @@
pub const JSON_ADMIN_V1: &str = r#"{
"valid": {
"uuid": "00000000-0000-0000-0000-000000000000"
},
"state": null,
"attrs": {
"class": ["account", "memberof", "object"],
"name": ["admin"],
@ -13,10 +9,6 @@ pub const JSON_ADMIN_V1: &str = r#"{
}"#;
pub const JSON_IDM_ADMIN_V1: &str = r#"{
"valid": {
"uuid": "00000000-0000-0000-0000-000000000018"
},
"state": null,
"attrs": {
"class": ["account", "memberof", "object"],
"name": ["idm_admin"],
@ -27,10 +19,6 @@ pub const JSON_IDM_ADMIN_V1: &str = r#"{
}"#;
pub const JSON_IDM_ADMINS_V1: &str = r#"{
"valid": {
"uuid": "00000000-0000-0000-0000-000000000001"
},
"state": null,
"attrs": {
"class": ["group", "object"],
"name": ["idm_admins"],
@ -41,10 +29,6 @@ pub const JSON_IDM_ADMINS_V1: &str = r#"{
}"#;
pub const JSON_SYSTEM_ADMINS_V1: &str = r#"{
"valid": {
"uuid": "00000000-0000-0000-0000-000000000019"
},
"state": null,
"attrs": {
"class": ["group", "object"],
"name": ["system_admins"],
@ -351,14 +335,9 @@ pub const JSON_ANONYMOUS_V1: &str = r#"{
}
}"#;
// need a domain_trust_info as well.
// TODO
// ============ TEST DATA ============
#[cfg(test)]
pub const JSON_TESTPERSON1: &str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object"],
"name": ["testperson1"],
@ -368,8 +347,6 @@ pub const JSON_TESTPERSON1: &str = r#"{
#[cfg(test)]
pub const JSON_TESTPERSON2: &str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object"],
"name": ["testperson2"],

View file

@ -3,10 +3,6 @@
// system supplementary
pub const JSON_SCHEMA_ATTR_DISPLAYNAME: &str = r#"{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000040"
},
"state": null,
"attrs": {
"class": [
"object",
@ -38,10 +34,6 @@ pub const JSON_SCHEMA_ATTR_DISPLAYNAME: &str = r#"{
}"#;
pub const JSON_SCHEMA_ATTR_MAIL: &str = r#"
{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000041"
},
"state": null,
"attrs": {
"class": [
"object",
@ -74,10 +66,6 @@ pub const JSON_SCHEMA_ATTR_MAIL: &str = r#"
"#;
pub const JSON_SCHEMA_ATTR_SSH_PUBLICKEY: &str = r#"
{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000042"
},
"state": null,
"attrs": {
"class": [
"object",
@ -108,10 +96,6 @@ pub const JSON_SCHEMA_ATTR_SSH_PUBLICKEY: &str = r#"
"#;
pub const JSON_SCHEMA_ATTR_PRIMARY_CREDENTIAL: &str = r#"
{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000043"
},
"state": null,
"attrs": {
"class": [
"object",
@ -444,10 +428,6 @@ pub const JSON_SCHEMA_ATTR_NSUNIQUEID: &str = r#"{
pub const JSON_SCHEMA_CLASS_PERSON: &str = r#"
{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000044"
},
"state": null,
"attrs": {
"class": [
"object",
@ -477,10 +457,6 @@ pub const JSON_SCHEMA_CLASS_PERSON: &str = r#"
pub const JSON_SCHEMA_CLASS_GROUP: &str = r#"
{
"valid": {
"uuid": "00000000-0000-0000-0000-ffff00000045"
},
"state": null,
"attrs": {
"class": [
"object",

View file

@ -21,7 +21,6 @@ pub const _UUID_IDM_HP_ACCOUNT_MANAGE_PRIV: &str = "00000000-0000-0000-0000-0000
pub const _UUID_IDM_HP_GROUP_MANAGE_PRIV: &str = "00000000-0000-0000-0000-000000000017";
pub const _UUID_IDM_ADMIN_V1: &str = "00000000-0000-0000-0000-000000000018";
pub const _UUID_SYSTEM_ADMINS: &str = "00000000-0000-0000-0000-000000000019";
// TODO
pub const UUID_DOMAIN_ADMINS: &str = "00000000-0000-0000-0000-000000000020";
pub const _UUID_IDM_ACCOUNT_UNIX_EXTEND_PRIV: &str = "00000000-0000-0000-0000-000000000021";
pub const _UUID_IDM_GROUP_UNIX_EXTEND_PRIV: &str = "00000000-0000-0000-0000-000000000022";

View file

@ -87,7 +87,7 @@ where
v.into_iter().for_each(|r| actor.framed.write(r));
}
Ok(None) | Err(_) => {
error!("Internal server error");
eprintln!("Internal server error");
ctx.stop();
}
};
@ -108,12 +108,12 @@ where
// It's queued, we are done.
Ok(_) => {}
Err(_) => {
error!("Too many queue msgs for connection");
eprintln!("Too many queue msgs for connection");
ctx.stop()
}
},
Err(_) => {
error!("Io error");
eprintln!("Io error");
ctx.stop()
}
}
@ -179,11 +179,9 @@ impl Handler<TlsConnect> for LdapServer {
actor.qe_r.clone(),
)
});
()
})
.map_err(|_| {
error!("invalid tls handshake");
()
eprintln!("invalid tls handshake");
})
});
@ -197,8 +195,7 @@ pub(crate) async fn create_ldap_server(
qe_r: Addr<QueryServerReadV1>,
) -> Result<(), ()> {
let addr = net::SocketAddr::from_str(address).map_err(|e| {
error!("Could not parse ldap server address {} -> {:?}", address, e);
()
eprintln!("Could not parse ldap server address {} -> {:?}", address, e);
})?;
let listener = Box::new(TcpListener::bind(&addr).await.unwrap());

View file

@ -241,7 +241,9 @@ async fn json_rest_event_delete_id(
};
match state.qe_w.send(obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -270,7 +272,6 @@ async fn json_rest_event_get_id_attr(
match state.qe_r.send(obj).await {
Ok(Ok(mut event_result)) => {
// TODO: Check this only has len 1, even though that satte should be impossible.
// Only get one result
let r = event_result.pop().and_then(|mut e| {
// Only get the attribute as requested.
@ -328,7 +329,9 @@ async fn json_rest_event_post_id_attr(
};
// Add a msg here
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -356,7 +359,9 @@ async fn json_rest_event_put_id_attr(
eventid,
};
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -374,7 +379,8 @@ async fn json_rest_event_delete_id_attr(
let (id, attr) = path.into_inner();
let (eventid, hvalue) = new_eventid!();
// TODO: Attempt to get an option Vec<String> here?
// TODO #211: Attempt to get an option Vec<String> here?
// It's probably better to focus on SCIM instead, it seems richer than this.
let obj = PurgeAttributeMessage {
uat,
uuid_or_name: id,
@ -384,7 +390,9 @@ async fn json_rest_event_delete_id_attr(
};
match state.qe_w.send(obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -560,6 +568,8 @@ async fn account_id_get_attr(
json_rest_event_get_id_attr(path, session, state, filter).await
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn account_id_post_attr(
(values, path, session, state): (
Json<Vec<String>>,
@ -579,6 +589,8 @@ async fn account_id_delete_attr(
json_rest_event_delete_id_attr(path, session, state, filter).await
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn account_id_put_attr(
(values, path, session, state): (
Json<Vec<String>>,
@ -633,6 +645,8 @@ async fn account_get_id_ssh_pubkeys(
}
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn account_post_id_ssh_pubkey(
(obj, path, session, state): (
Json<(String, String)>,
@ -656,7 +670,9 @@ async fn account_post_id_ssh_pubkey(
};
// Add a msg here
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -704,7 +720,9 @@ async fn account_delete_id_ssh_pubkey_tag(
};
match state.qe_w.send(obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -797,7 +815,9 @@ async fn account_post_id_person_extend(
eventid,
};
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -818,7 +838,9 @@ async fn account_post_id_unix(
let (eventid, hvalue) = new_eventid!();
let m_obj = IdmAccountUnixExtendMessage::new(uat, id, obj.into_inner(), eventid);
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -892,7 +914,9 @@ async fn account_put_id_unix_credential(
eventid,
};
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -916,7 +940,9 @@ async fn account_delete_id_unix_credential(
};
match state.qe_w.send(obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -950,6 +976,8 @@ async fn group_id_get_attr(
json_rest_event_get_id_attr(path, session, state, filter).await
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn group_id_post_attr(
(values, path, session, state): (
Json<Vec<String>>,
@ -969,6 +997,8 @@ async fn group_id_delete_attr(
json_rest_event_delete_id_attr(path, session, state, filter).await
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn group_id_put_attr(
(values, path, session, state): (
Json<Vec<String>>,
@ -996,7 +1026,9 @@ async fn group_post_id_unix(
let (eventid, hvalue) = new_eventid!();
let m_obj = IdmGroupUnixExtendMessage::new(uat, id, obj.into_inner(), eventid);
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok()
.header("X-KANIDM-OPID", hvalue)
.json(true),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -1045,6 +1077,8 @@ async fn domain_id_get_attr(
json_rest_event_get_id_attr(path, session, state, filter).await
}
// Matches actix-web styles
#[allow(clippy::type_complexity)]
async fn domain_id_put_attr(
(values, path, session, state): (
Json<Vec<String>>,
@ -1118,7 +1152,7 @@ async fn recycle_bin_revive_id_post(
eventid,
};
match state.qe_w.send(m_obj).await {
Ok(Ok(r)) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(r),
Ok(Ok(())) => HttpResponse::Ok().header("X-KANIDM-OPID", hvalue).json(()),
Ok(Err(e)) => operation_error_to_response(e, hvalue),
Err(_) => HttpResponse::InternalServerError()
.header("X-KANIDM-OPID", hvalue)
@ -1230,7 +1264,7 @@ async fn status((_session, state): (Session, Data<AppState>)) -> HttpResponse {
// === internal setup helpers
fn setup_backend(config: &Configuration) -> Result<Backend, OperationError> {
let mut audit_be = AuditScope::new("backend_setup", uuid::Uuid::new_v4());
let mut audit_be = AuditScope::new("backend_setup", uuid::Uuid::new_v4(), config.log_level);
let pool_size: u32 = config.threads as u32;
let be = Backend::new(&mut audit_be, config.db_path.as_str(), pool_size);
// debug!
@ -1285,7 +1319,7 @@ pub fn backup_server_core(config: Configuration, dst_path: &str) {
return;
}
};
let mut audit = AuditScope::new("backend_backup", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("backend_backup", uuid::Uuid::new_v4(), config.log_level);
let mut be_ro_txn = be.read();
let r = be_ro_txn.backup(&mut audit, dst_path);
@ -1308,7 +1342,7 @@ pub fn restore_server_core(config: Configuration, dst_path: &str) {
return;
}
};
let mut audit = AuditScope::new("backend_restore", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("backend_restore", uuid::Uuid::new_v4(), config.log_level);
// First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new(&mut audit) {
@ -1377,18 +1411,18 @@ pub fn reindex_server_core(config: Configuration) {
return;
}
};
let mut audit = AuditScope::new("server_reindex", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("server_reindex", uuid::Uuid::new_v4(), config.log_level);
// First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new(&mut audit) {
Ok(s) => s,
Err(e) => {
error!("Failed to setup in memory schema: {:?}", e);
eprintln!("Failed to setup in memory schema: {:?}", e);
std::process::exit(1);
}
};
info!("Start Index Phase 1 ...");
eprintln!("Start Index Phase 1 ...");
// Reindex only the core schema attributes to bootstrap the process.
let mut be_wr_txn = {
// Limit the scope of the schema txn.
@ -1403,12 +1437,15 @@ pub fn reindex_server_core(config: Configuration) {
// Now that's done, setup a minimal qs and reindex from that.
if r.is_err() {
audit.write_log();
error!("Failed to reindex database: {:?}", r);
eprintln!("Failed to reindex database: {:?}", r);
std::process::exit(1);
}
info!("Index Phase 1 Success!");
eprintln!("Index Phase 1 Success!");
info!("Attempting to init query server ...");
audit.write_log();
let mut audit = AuditScope::new("server_reindex", uuid::Uuid::new_v4(), config.log_level);
eprintln!("Attempting to init query server ...");
let (qs, _idms) = match setup_qs_idms(&mut audit, be) {
Ok(t) => t,
@ -1418,9 +1455,12 @@ pub fn reindex_server_core(config: Configuration) {
return;
}
};
info!("Init Query Server Success!");
eprintln!("Init Query Server Success!");
info!("Start Index Phase 2 ...");
audit.write_log();
let mut audit = AuditScope::new("server_reindex", uuid::Uuid::new_v4(), config.log_level);
eprintln!("Start Index Phase 2 ...");
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write
@ -1430,16 +1470,16 @@ pub fn reindex_server_core(config: Configuration) {
audit.write_log();
match r {
Ok(_) => info!("Index Phase 2 Success!"),
Ok(_) => eprintln!("Index Phase 2 Success!"),
Err(e) => {
error!("Reindex failed: {:?}", e);
eprintln!("Reindex failed: {:?}", e);
std::process::exit(1);
}
};
}
pub fn domain_rename_core(config: Configuration, new_domain_name: String) {
let mut audit = AuditScope::new("domain_rename", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("domain_rename", uuid::Uuid::new_v4(), config.log_level);
// Start the backend.
let be = match setup_backend(&config) {
@ -1491,7 +1531,7 @@ pub fn reset_sid_core(config: Configuration) {
*/
pub fn verify_server_core(config: Configuration) {
let mut audit = AuditScope::new("server_verify", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("server_verify", uuid::Uuid::new_v4(), config.log_level);
// Setup the be
let be = match setup_backend(&config) {
Ok(be) => be,
@ -1529,7 +1569,7 @@ pub fn verify_server_core(config: Configuration) {
}
pub fn recover_account_core(config: Configuration, name: String, password: String) {
let mut audit = AuditScope::new("recover_account", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("recover_account", uuid::Uuid::new_v4(), config.log_level);
// Start the backend.
let be = match setup_backend(&config) {
@ -1585,7 +1625,7 @@ pub async fn create_server_core(config: Configuration) -> Result<ServerCtx, ()>
let log_thread = thread::spawn(move || async_log::run(log_rx));
// Start the status tracking thread
let status_addr = StatusActor::start(log_tx.clone());
let status_addr = StatusActor::start(log_tx.clone(), config.log_level);
// Setup TLS (if any)
let opt_tls_params = match setup_tls(&config) {
@ -1608,7 +1648,7 @@ pub async fn create_server_core(config: Configuration) -> Result<ServerCtx, ()>
}
};
let mut audit = AuditScope::new("setup_qs_idms", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("setup_qs_idms", uuid::Uuid::new_v4(), config.log_level);
// Start the IDM server.
let (qs, idms) = match setup_qs_idms(&mut audit, be) {
Ok(t) => t,
@ -1671,13 +1711,15 @@ pub async fn create_server_core(config: Configuration) -> Result<ServerCtx, ()>
// Start the read query server with the given be path: future config
let server_read_addr = QueryServerReadV1::start(
log_tx.clone(),
config.log_level,
qs.clone(),
idms_arc.clone(),
ldap_arc.clone(),
config.threads,
);
// Start the write thread
let server_write_addr = QueryServerWriteV1::start(log_tx.clone(), qs, idms_arc);
let server_write_addr =
QueryServerWriteV1::start(log_tx.clone(), config.log_level, qs, idms_arc);
// Setup timed events associated to the write thread
let _int_addr = IntervalActor::new(server_write_addr.clone()).start();
@ -1738,7 +1780,7 @@ pub async fn create_server_core(config: Configuration) -> Result<ServerCtx, ()>
.app_data(
web::JsonConfig::default()
// Currently 4MB
.limit(4194304)
.limit(4_194_304)
.error_handler(|err, _req| {
let s = format!("{}", err);
error::InternalError::from_response(err, HttpResponse::BadRequest().json(s))

View file

@ -20,7 +20,7 @@ pub enum Policy {
}
*/
// TODO: Determine this at startup based on a time factor
// TODO #255: Determine this at startup based on a time factor
const PBKDF2_COST: usize = 10000;
// NIST 800-63.b salt should be 112 bits -> 14 8u8.
// I choose tinfoil hat though ...
@ -59,6 +59,8 @@ impl TryFrom<DbPasswordV1> for Password {
impl TryFrom<&str> for Password {
type Error = ();
// As we may add more algos, we keep the match algo single for later.
#[allow(clippy::single_match)]
fn try_from(value: &str) -> Result<Self, Self::Error> {
// There is probably a more efficent way to try this given different types?
@ -72,7 +74,7 @@ impl TryFrom<&str> for Password {
match algo {
"pbkdf2_sha256" => {
let c = usize::from_str_radix(cost, 10).map_err(|_| ())?;
let s: Vec<_> = salt.as_bytes().iter().map(|b| *b).collect();
let s: Vec<_> = salt.as_bytes().to_vec();
let h = base64::decode(hash).map_err(|_| ())?;
if h.len() < PBKDF2_IMPORT_MIN_LEN {
return Err(());
@ -170,7 +172,7 @@ pub struct Credential {
// Uuid of Credential, used by auth session to lock this specific credential
// if required.
pub(crate) uuid: Uuid,
// TODO: Add auth policy IE validUntil, lock state ...
// TODO #59: Add auth policy IE validUntil, lock state ...
// locked: bool
}
@ -246,7 +248,7 @@ impl Credential {
password: Some(pw),
totp: self.totp.clone(),
claims: self.claims.clone(),
uuid: self.uuid.clone(),
uuid: self.uuid,
}
}
@ -256,7 +258,7 @@ impl Credential {
password: self.password.clone(),
totp: Some(totp),
claims: self.claims.clone(),
uuid: self.uuid.clone(),
uuid: self.uuid,
}
}

View file

@ -126,7 +126,7 @@ impl TOTP {
DbTotpV1 {
l: self.label.clone(),
k: self.secret.clone(),
s: self.step.clone(),
s: self.step,
a: match self.algo {
TOTPAlgo::Sha1 => DbTotpAlgoV1::S1,
TOTPAlgo::Sha256 => DbTotpAlgoV1::S256,
@ -184,7 +184,7 @@ impl TOTP {
.replace("%3A", "")
.replace(" ", "%20"),
secret: self.secret.clone(),
step: self.step.clone(),
step: self.step,
algo: match self.algo {
TOTPAlgo::Sha1 => ProtoTOTPAlgo::Sha1,
TOTPAlgo::Sha256 => ProtoTOTPAlgo::Sha256,

View file

@ -268,10 +268,12 @@ pub struct Entry<VALID, STATE> {
impl<VALID, STATE> std::fmt::Debug for Entry<VALID, STATE>
where
STATE: std::fmt::Debug,
VALID: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Entry<EntrySealed, _>")
.field("state", &self.state)
.field("valid", &self.valid)
.finish()
}
}
@ -332,10 +334,11 @@ impl Entry<EntryInit, EntryNew> {
.attrs
.iter()
.map(|(k, v)| {
let nk = qs.get_schema().normalise_attr_name(k);
let nv: Result<BTreeSet<Value>, _> =
v.iter().map(|vr| qs.clone_value(audit, &k, vr)).collect();
v.iter().map(|vr| qs.clone_value(audit, &nk, vr)).collect();
match nv {
Ok(nvi) => Ok((k.clone(), nvi)),
Ok(nvi) => Ok((nk, nvi)),
Err(e) => Err(e),
}
})
@ -366,13 +369,10 @@ impl Entry<EntryInit, EntryNew> {
}
}
// str -> Proto entry
let pe: ProtoEntry = try_audit!(
audit,
serde_json::from_str(es).map_err(|e| {
ladmin_error!(audit, "SerdeJson Failure -> {:?}", e);
OperationError::SerdeJsonError
})
);
let pe: ProtoEntry = serde_json::from_str(es).map_err(|e| {
ladmin_error!(audit, "SerdeJson Failure -> {:?}", e);
OperationError::SerdeJsonError
})?;
// now call from_proto_entry
Self::from_proto_entry(audit, &pe, qs)
}
@ -624,7 +624,7 @@ impl<STATE> Entry<EntryInvalid, STATE> {
}
});
if invalid_classes.len() != 0 {
if !invalid_classes.is_empty() {
// lrequest_error!("Class on entry not found in schema?");
return Err(SchemaError::InvalidClass(invalid_classes));
};
@ -664,7 +664,7 @@ impl<STATE> Entry<EntryInvalid, STATE> {
}
});
if missing_must.len() != 0 {
if !missing_must.is_empty() {
return Err(SchemaError::MissingMustAttribute(missing_must));
}
@ -796,7 +796,7 @@ impl Entry<EntryInvalid, EntryCommitted> {
}
}
pub fn to_recycled(mut self) -> Self {
pub fn into_recycled(mut self) -> Self {
self.add_ava("class", &Value::new_class("recycled"));
Entry {
@ -918,7 +918,7 @@ impl<VALID> Entry<VALID, EntryCommitted> {
}
impl<STATE> Entry<EntrySealed, STATE> {
pub fn to_init(self) -> Entry<EntryInit, STATE> {
pub fn into_init(self) -> Entry<EntryInit, STATE> {
Entry {
valid: EntryInit,
state: self.state,
@ -981,14 +981,14 @@ impl Entry<EntrySealed, EntryCommitted> {
}
#[inline]
fn get_uuid2spn(&self) -> Value {
pub(crate) fn get_uuid2spn(&self) -> Value {
self.attrs
.get("spn")
.and_then(|vs| vs.iter().take(1).next().map(|v| v.clone()))
.and_then(|vs| vs.iter().take(1).next().cloned())
.or_else(|| {
self.attrs
.get("name")
.and_then(|vs| vs.iter().take(1).next().map(|v| v.clone()))
.and_then(|vs| vs.iter().take(1).next().cloned())
})
.unwrap_or_else(|| Value::new_uuidr(self.get_uuid()))
}
@ -1415,7 +1415,7 @@ impl Entry<EntrySealed, EntryCommitted> {
}
}
pub fn to_valid(self, cid: Cid) -> Entry<EntryValid, EntryCommitted> {
pub fn into_valid(self, cid: Cid) -> Entry<EntryValid, EntryCommitted> {
Entry {
valid: EntryValid {
uuid: self.valid.uuid,
@ -1561,13 +1561,6 @@ impl<VALID, STATE> Entry<VALID, STATE> {
let _ = self.attrs.insert("last_modified_cid".to_string(), cv);
}
/*
* WARNING: Should these TODO move to EntryValid only?
* I've tried to do this once, but the issue is that there
* is a lot of code in normalised and other states that
* relies on the ability to get ava. I think we may not be
* able to do so "easily".
*/
pub fn get_ava(&self, attr: &str) -> Option<Vec<&Value>> {
match self.attrs.get(attr) {
Some(vs) => {

View file

@ -78,7 +78,7 @@ impl From<&EventOrigin> for EventOriginId {
fn from(event: &EventOrigin) -> Self {
match event {
EventOrigin::Internal => EventOriginId::Internal,
EventOrigin::User(e) => EventOriginId::User(e.get_uuid().clone()),
EventOrigin::User(e) => EventOriginId::User(*e.get_uuid()),
}
}
}
@ -101,17 +101,37 @@ pub struct Event {
pub origin: EventOrigin,
}
impl std::fmt::Display for Event {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match &self.origin {
EventOrigin::Internal => write!(f, "Internal"),
EventOrigin::User(e) => {
let nv = e.get_uuid2spn();
write!(
f,
"User( {}, {} ) ",
nv.to_proto_string_clone(),
e.get_uuid().to_hyphenated_ref()
)
}
}
}
}
impl Event {
pub fn from_ro_request(
audit: &mut AuditScope,
qs: &mut QueryServerReadTransaction,
user_uuid: &Uuid,
) -> Result<Self, OperationError> {
let e = try_audit!(audit, qs.internal_search_uuid(audit, &user_uuid));
Ok(Event {
origin: EventOrigin::User(e),
})
qs.internal_search_uuid(audit, &user_uuid)
.map(|e| Event {
origin: EventOrigin::User(e),
})
.map_err(|e| {
ladmin_error!(audit, "from_ro_request failed {:?}", e);
e
})
}
pub fn from_ro_uat(
@ -121,12 +141,15 @@ impl Event {
) -> Result<Self, OperationError> {
ltrace!(audit, "from_ro_uat -> {:?}", uat);
let uat = uat.ok_or(OperationError::NotAuthenticated)?;
let u = try_audit!(
audit,
Uuid::parse_str(uat.uuid.as_str()).map_err(|_| OperationError::InvalidUuid)
);
let u = Uuid::parse_str(uat.uuid.as_str()).map_err(|_| {
ladmin_error!(audit, "from_ro_uat invalid uat uuid");
OperationError::InvalidUuid
})?;
let e = try_audit!(audit, qs.internal_search_uuid(audit, &u));
let e = qs.internal_search_uuid(audit, &u).map_err(|e| {
ladmin_error!(audit, "from_ro_uat failed {:?}", e);
e
})?;
// TODO #64: Now apply claims from the uat into the Entry
// to allow filtering.
@ -142,12 +165,15 @@ impl Event {
) -> Result<Self, OperationError> {
ltrace!(audit, "from_rw_uat -> {:?}", uat);
let uat = uat.ok_or(OperationError::NotAuthenticated)?;
let u = try_audit!(
audit,
Uuid::parse_str(uat.uuid.as_str()).map_err(|_| OperationError::InvalidUuid)
);
let u = Uuid::parse_str(uat.uuid.as_str()).map_err(|_| {
ladmin_error!(audit, "from_rw_uat invalid uat uuid");
OperationError::InvalidUuid
})?;
let e = try_audit!(audit, qs.internal_search_uuid(audit, &u));
let e = qs.internal_search_uuid(audit, &u).map_err(|e| {
ladmin_error!(audit, "from_rw_uat failed {:?}", e);
e
})?;
// TODO #64: Now apply claims from the uat into the Entry
// to allow filtering.
@ -165,11 +191,14 @@ impl Event {
// In the future, probably yes.
//
// For now, no.
let u = try_audit!(
audit,
Uuid::parse_str(user_uuid).map_err(|_| OperationError::InvalidUuid)
);
let e = try_audit!(audit, qs.internal_search_uuid(audit, &u));
let u = Uuid::parse_str(user_uuid).map_err(|_| {
ladmin_error!(audit, "from_ro_request invalid uat uuid");
OperationError::InvalidUuid
})?;
let e = qs.internal_search_uuid(audit, &u).map_err(|e| {
ladmin_error!(audit, "from_rw_request failed {:?}", e);
e
})?;
Ok(Event {
origin: EventOrigin::User(e),
@ -341,7 +370,6 @@ impl SearchEvent {
filter_orig: filter_all!(f_self())
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
// TODO: Should we limit this?
attrs: None,
})
}
@ -857,13 +885,13 @@ pub enum AuthEventStep {
impl AuthEventStep {
fn from_authstep(aus: AuthStep, sid: Option<Uuid>) -> Result<Self, OperationError> {
match aus {
AuthStep::Init(name, appid) => {
AuthStep::Init(name) => {
if sid.is_some() {
Err(OperationError::InvalidAuthState(
"session id present in init".to_string(),
))
} else {
Ok(AuthEventStep::Init(AuthEventStepInit { name, appid }))
Ok(AuthEventStep::Init(AuthEventStepInit { name, appid: None }))
}
}
AuthStep::Creds(creds) => match sid {
@ -1066,7 +1094,7 @@ impl ReviveRecycledEvent {
filter: filter
.into_recycled()
.validate(qs.get_schema())
.map_err(|e| OperationError::SchemaViolation(e))?,
.map_err(OperationError::SchemaViolation)?,
})
}

View file

@ -421,7 +421,7 @@ impl Filter<FilterInvalid> {
f: &ProtoFilter,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
lperf_segment!(audit, "filter::from_ro", || {
lperf_trace_segment!(audit, "filter::from_ro", || {
Ok(Filter {
state: FilterInvalid {
inner: FilterComp::from_ro(audit, f, qs)?,
@ -435,7 +435,7 @@ impl Filter<FilterInvalid> {
f: &ProtoFilter,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
lperf_segment!(audit, "filter::from_rw", || {
lperf_trace_segment!(audit, "filter::from_rw", || {
Ok(Filter {
state: FilterInvalid {
inner: FilterComp::from_rw(audit, f, qs)?,
@ -449,7 +449,7 @@ impl Filter<FilterInvalid> {
f: &LdapFilter,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
lperf_segment!(audit, "filter::from_ldap_ro", || {
lperf_trace_segment!(audit, "filter::from_ldap_ro", || {
Ok(Filter {
state: FilterInvalid {
inner: FilterComp::from_ldap_ro(audit, f, qs)?,
@ -629,11 +629,20 @@ impl FilterComp {
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
Ok(match f {
ProtoFilter::Eq(a, v) => FilterComp::Eq(a.clone(), qs.clone_partialvalue(audit, a, v)?),
ProtoFilter::Sub(a, v) => {
FilterComp::Sub(a.clone(), qs.clone_partialvalue(audit, a, v)?)
ProtoFilter::Eq(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a);
let v = qs.clone_partialvalue(audit, nk.as_str(), v)?;
FilterComp::Eq(nk, v)
}
ProtoFilter::Sub(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a);
let v = qs.clone_partialvalue(audit, nk.as_str(), v)?;
FilterComp::Sub(nk, v)
}
ProtoFilter::Pres(a) => {
let nk = qs.get_schema().normalise_attr_name(a);
FilterComp::Pres(nk)
}
ProtoFilter::Pres(a) => FilterComp::Pres(a.clone()),
ProtoFilter::Or(l) => FilterComp::Or(
l.iter()
.map(|f| Self::from_ro(audit, f, qs))
@ -655,11 +664,20 @@ impl FilterComp {
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(match f {
ProtoFilter::Eq(a, v) => FilterComp::Eq(a.clone(), qs.clone_partialvalue(audit, a, v)?),
ProtoFilter::Sub(a, v) => {
FilterComp::Sub(a.clone(), qs.clone_partialvalue(audit, a, v)?)
ProtoFilter::Eq(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a);
let v = qs.clone_partialvalue(audit, nk.as_str(), v)?;
FilterComp::Eq(nk, v)
}
ProtoFilter::Sub(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a);
let v = qs.clone_partialvalue(audit, nk.as_str(), v)?;
FilterComp::Sub(nk, v)
}
ProtoFilter::Pres(a) => {
let nk = qs.get_schema().normalise_attr_name(a);
FilterComp::Pres(nk)
}
ProtoFilter::Pres(a) => FilterComp::Pres(a.clone()),
ProtoFilter::Or(l) => FilterComp::Or(
l.iter()
.map(|f| Self::from_rw(audit, f, qs))
@ -1286,10 +1304,6 @@ mod tests {
let e: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "db237e8a-0079-4b8c-8a56-593b22aa44d1"
},
"state": null,
"attrs": {
"userid": ["william"],
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
@ -1338,10 +1352,6 @@ mod tests {
let e: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "db237e8a-0079-4b8c-8a56-593b22aa44d1"
},
"state": null,
"attrs": {
"userid": ["william"],
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
@ -1390,10 +1400,6 @@ mod tests {
let e1: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "db237e8a-0079-4b8c-8a56-593b22aa44d1"
},
"state": null,
"attrs": {
"userid": ["william"],
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
@ -1423,10 +1429,6 @@ mod tests {
let e1: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "db237e8a-0079-4b8c-8a56-593b22aa44d1"
},
"state": null,
"attrs": {
"class": ["person"],
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
@ -1440,10 +1442,6 @@ mod tests {
let e2: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "4b6228ab-1dbe-42a4-a9f5-f6368222438e"
},
"state": null,
"attrs": {
"class": ["person"],
"uuid": ["4b6228ab-1dbe-42a4-a9f5-f6368222438e"],
@ -1457,10 +1455,6 @@ mod tests {
let e3: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "7b23c99d-c06b-4a9a-a958-3afa56383e1d"
},
"state": null,
"attrs": {
"class": ["person"],
"uuid": ["7b23c99d-c06b-4a9a-a958-3afa56383e1d"],
@ -1474,10 +1468,6 @@ mod tests {
let e4: Entry<EntrySealed, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
r#"{
"valid": {
"uuid": "21d816b5-1f6a-4696-b7c1-6ed06d22ed81"
},
"state": null,
"attrs": {
"class": ["group"],
"uuid": ["21d816b5-1f6a-4696-b7c1-6ed06d22ed81"],

View file

@ -87,7 +87,7 @@ pub(crate) struct Account {
// app_creds: Vec<Credential>
// account expiry? (as opposed to cred expiry)
pub spn: String,
// TODO: When you add mail, you should update the check to zxcvbn
// TODO #256: When you add mail, you should update the check to zxcvbn
// to include these.
// pub mail: Vec<String>
}
@ -98,7 +98,7 @@ impl Account {
value: Entry<EntrySealed, EntryCommitted>,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
lperf_segment!(au, "idm::account::try_from_entry_ro", || {
lperf_trace_segment!(au, "idm::account::try_from_entry_ro", || {
let groups = Group::try_from_account_entry_ro(au, &value, qs)?;
try_from_entry!(value, groups)
})
@ -133,7 +133,7 @@ impl Account {
spn: self.spn.clone(),
displayname: self.name.clone(),
uuid: self.uuid.to_hyphenated_ref().to_string(),
application: None,
// application: None,
groups: self.groups.iter().map(|g| g.to_proto()).collect(),
claims: claims.iter().map(|c| c.to_proto()).collect(),
})
@ -154,7 +154,7 @@ impl Account {
match appid {
Some(_) => Err(OperationError::InvalidState),
None => {
// TODO: Enforce PW policy. Can we allow this change?
// TODO #59: Enforce PW policy. Can we allow this change?
match &self.primary {
// Change the cred
Some(primary) => {

View file

@ -288,7 +288,7 @@ pub(crate) struct AuthSession {
}
impl AuthSession {
pub fn new(account: Account, appid: Option<String>) -> Self {
pub fn new(au: &mut AuditScope, account: Account, appid: Option<String>) -> Self {
// During this setup, determine the credential handler that we'll be using
// for this session. This is currently based on presentation of an application
// id.
@ -304,9 +304,14 @@ impl AuthSession {
// Now we see if they have one ...
match &account.primary {
Some(cred) => {
// TODO: Log this corruption better ... :(
// Probably means new authsession has to be failable
CredHandler::try_from(cred).unwrap_or_else(|_| CredHandler::Denied)
CredHandler::try_from(cred).unwrap_or_else(|_| {
lsecurity_critical!(
au,
"corrupt credentials, unable to start credhandler"
);
CredHandler::Denied
})
}
None => CredHandler::Denied,
}
@ -409,9 +414,14 @@ mod tests {
#[test]
fn test_idm_authsession_anonymous_auth_mech() {
let mut audit = AuditScope::new(
"test_idm_authsession_anonymous_auth_mech",
uuid::Uuid::new_v4(),
None,
);
let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1);
let session = AuthSession::new(anon_account, None);
let session = AuthSession::new(&mut audit, anon_account, None);
let auth_mechs = session.valid_auth_mechs();
@ -425,10 +435,13 @@ mod tests {
#[test]
fn test_idm_authsession_floodcheck_mech() {
let mut audit =
AuditScope::new("test_idm_authsession_floodcheck_mech", uuid::Uuid::new_v4());
let mut audit = AuditScope::new(
"test_idm_authsession_floodcheck_mech",
uuid::Uuid::new_v4(),
None,
);
let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1);
let mut session = AuthSession::new(anon_account, None);
let mut session = AuthSession::new(&mut audit, anon_account, None);
let attempt = vec![
AuthCredential::Anonymous,
@ -449,8 +462,17 @@ mod tests {
#[test]
fn test_idm_authsession_missing_appid() {
let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1);
let mut audit = AuditScope::new(
"test_idm_authsession_missing_appid",
uuid::Uuid::new_v4(),
None,
);
let session = AuthSession::new(anon_account, Some("NonExistantAppID".to_string()));
let session = AuthSession::new(
&mut audit,
anon_account,
Some("NonExistantAppID".to_string()),
);
let auth_mechs = session.valid_auth_mechs();
@ -463,6 +485,7 @@ mod tests {
let mut audit = AuditScope::new(
"test_idm_authsession_simple_password_mech",
uuid::Uuid::new_v4(),
None,
);
// create the ent
let mut account = entry_str_to_account!(JSON_ADMIN_V1);
@ -471,7 +494,7 @@ mod tests {
account.primary = Some(cred);
// now check
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
let auth_mechs = session.valid_auth_mechs();
assert!(
@ -487,7 +510,7 @@ mod tests {
_ => panic!(),
};
let mut session = AuthSession::new(account, None);
let mut session = AuthSession::new(&mut audit, account, None);
let attempt = vec![AuthCredential::Password("test_password".to_string())];
match session.validate_creds(&mut audit, &attempt, &Duration::from_secs(0)) {
Ok(AuthState::Success(_)) => {}
@ -502,6 +525,7 @@ mod tests {
let mut audit = AuditScope::new(
"test_idm_authsession_totp_password_mech",
uuid::Uuid::new_v4(),
None,
);
// create the ent
let mut account = entry_str_to_account!(JSON_ADMIN_V1);
@ -528,7 +552,7 @@ mod tests {
account.primary = Some(cred);
// now check
let session = AuthSession::new(account.clone(), None);
let session = AuthSession::new(&mut audit, account.clone(), None);
let auth_mechs = session.valid_auth_mechs();
assert!(auth_mechs.iter().fold(true, |acc, x| match x {
AuthAllowed::Password => acc,
@ -540,7 +564,7 @@ mod tests {
// check send anon (fail)
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(&mut audit, &vec![AuthCredential::Anonymous], &ts) {
Ok(AuthState::Denied(msg)) => assert!(msg == BAD_AUTH_TYPE_MSG),
_ => panic!(),
@ -552,7 +576,7 @@ mod tests {
// check send bad pw, should get continue (even though denied set)
// then send good totp, should fail.
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![AuthCredential::Password(pw_bad.to_string())],
@ -569,7 +593,7 @@ mod tests {
// check send bad pw, should get continue (even though denied set)
// then send bad totp, should fail TOTP
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![AuthCredential::Password(pw_bad.to_string())],
@ -587,7 +611,7 @@ mod tests {
// check send good pw, should get continue
// then send good totp, success
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![AuthCredential::Password(pw_good.to_string())],
@ -605,7 +629,7 @@ mod tests {
// check send good pw, should get continue
// then send bad totp, fail otp
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![AuthCredential::Password(pw_good.to_string())],
@ -622,7 +646,7 @@ mod tests {
// check send bad totp, should fail immediate
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(&mut audit, &vec![AuthCredential::TOTP(totp_bad)], &ts) {
Ok(AuthState::Denied(msg)) => assert!(msg == BAD_TOTP_MSG),
_ => panic!(),
@ -632,7 +656,7 @@ mod tests {
// check send good totp, should continue
// then bad pw, fail pw
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(&mut audit, &vec![AuthCredential::TOTP(totp_good)], &ts) {
Ok(AuthState::Continue(cont)) => assert!(cont == vec![AuthAllowed::Password]),
_ => panic!(),
@ -650,7 +674,7 @@ mod tests {
// check send good totp, should continue
// then good pw, success
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(&mut audit, &vec![AuthCredential::TOTP(totp_good)], &ts) {
Ok(AuthState::Continue(cont)) => assert!(cont == vec![AuthAllowed::Password]),
_ => panic!(),
@ -669,7 +693,7 @@ mod tests {
// check bad totp, bad pw, fail totp.
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![
@ -684,7 +708,7 @@ mod tests {
}
// check send bad pw, good totp fail password
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![
@ -699,7 +723,7 @@ mod tests {
}
// check send good pw, bad totp fail totp.
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![
@ -714,7 +738,7 @@ mod tests {
}
// check good pw, good totp, success
{
let mut session = AuthSession::new(account.clone(), None);
let mut session = AuthSession::new(&mut audit, account.clone(), None);
match session.validate_creds(
&mut audit,
&vec![

View file

@ -32,7 +32,7 @@ impl MfaRegNext {
match self {
MfaRegNext::Success => SetCredentialResponse::Success,
MfaRegNext::TOTPCheck(secret) => {
SetCredentialResponse::TOTPCheck(u.clone(), (*secret).clone())
SetCredentialResponse::TOTPCheck(*u, (*secret).clone())
}
}
}

View file

@ -173,7 +173,7 @@ impl<'a> IdmServerWriteTransaction<'a> {
// continue, and helps to keep non-needed entry specific data
// out of the LRU.
let account = Account::try_from_entry_ro(au, entry, &mut self.qs_read)?;
let auth_session = AuthSession::new(account, init.appid.clone());
let auth_session = AuthSession::new(au, account, init.appid.clone());
// Get the set of mechanisms that can proceed. This is tied
// to the session so that it can mutate state and have progression
@ -202,13 +202,14 @@ impl<'a> IdmServerWriteTransaction<'a> {
AuthEventStep::Creds(creds) => {
lperf_segment!(au, "idm::server::auth<Creds>", || {
// Do we have a session?
let auth_session = try_audit!(
au,
self.sessions
// Why is the session missing?
.get_mut(&creds.sessionid)
.ok_or(OperationError::InvalidSessionState)
);
let auth_session = self
.sessions
// Why is the session missing?
.get_mut(&creds.sessionid)
.ok_or_else(|| {
ladmin_error!(au, "Invalid Session State (no present session uuid)");
OperationError::InvalidSessionState
})?;
// Process the credentials here as required.
// Basically throw them at the auth_session and see what
// falls out.
@ -235,13 +236,16 @@ impl<'a> IdmServerWriteTransaction<'a> {
// TODO #59: Implement soft lock checking for unix creds here!
// Get the entry/target we are working on.
let account_entry = try_audit!(au, self.qs_read.internal_search_uuid(au, &uae.target));
// Get their account
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_ro(au, account_entry, &mut self.qs_read)
);
let account = self
.qs_read
.internal_search_uuid(au, &uae.target)
.and_then(|account_entry| {
UnixUserAccount::try_from_entry_ro(au, account_entry, &mut self.qs_read)
})
.map_err(|e| {
ladmin_error!(au, "Failed to start auth unix -> {:?}", e);
e
})?;
// Validate the unix_pw - this checks the account/cred lock states.
account.verify_unix_credential(au, uae.cleartext.as_str())
@ -255,7 +259,13 @@ impl<'a> IdmServerWriteTransaction<'a> {
) -> Result<Option<LdapBoundToken>, OperationError> {
// TODO #59: Implement soft lock checking for unix creds here!
let account_entry = try_audit!(au, self.qs_read.internal_search_uuid(au, &lae.target));
let account_entry = self
.qs_read
.internal_search_uuid(au, &lae.target)
.map_err(|e| {
ladmin_error!(au, "Failed to start auth ldap -> {:?}", e);
e
})?;
/* !!! This would probably be better if we DIDN'T use the Unix/Account types ... ? */
// if anonymous
@ -263,9 +273,9 @@ impl<'a> IdmServerWriteTransaction<'a> {
// TODO: #59 We should have checked if anonymous was locked by now!
let account = Account::try_from_entry_ro(au, account_entry, &mut self.qs_read)?;
Ok(Some(LdapBoundToken {
spn: account.spn.clone(),
uuid: UUID_ANONYMOUS.clone(),
effective_uuid: UUID_ANONYMOUS.clone(),
spn: account.spn,
uuid: *UUID_ANONYMOUS,
effective_uuid: *UUID_ANONYMOUS,
}))
} else {
let account = UnixUserAccount::try_from_entry_ro(au, account_entry, &mut self.qs_read)?;
@ -274,9 +284,9 @@ impl<'a> IdmServerWriteTransaction<'a> {
.is_some()
{
Ok(Some(LdapBoundToken {
spn: account.spn.clone(),
uuid: account.uuid.clone(),
effective_uuid: UUID_ANONYMOUS.clone(),
spn: account.spn,
uuid: account.uuid,
effective_uuid: *UUID_ANONYMOUS,
}))
} else {
Ok(None)
@ -285,7 +295,7 @@ impl<'a> IdmServerWriteTransaction<'a> {
}
pub fn commit(self, au: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
lperf_trace_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
self.sessions.commit();
Ok(())
})
@ -298,16 +308,16 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
au: &mut AuditScope,
rate: &RadiusAuthTokenEvent,
) -> Result<RadiusAuthToken, OperationError> {
// TODO: This needs to be an impersonate search!
let account_entry = try_audit!(
au,
self.qs_read
.impersonate_search_ext_uuid(au, &rate.target, &rate.event)
);
let account = try_audit!(
au,
RadiusAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
);
let account = self
.qs_read
.impersonate_search_ext_uuid(au, &rate.target, &rate.event)
.and_then(|account_entry| {
RadiusAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
})
.map_err(|e| {
ladmin_error!(au, "Failed to start radius auth token {:?}", e);
e
})?;
account.to_radiusauthtoken()
}
@ -317,16 +327,17 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
au: &mut AuditScope,
uute: &UnixUserTokenEvent,
) -> Result<UnixUserToken, OperationError> {
let account_entry = try_audit!(
au,
self.qs_read
.impersonate_search_ext_uuid(au, &uute.target, &uute.event)
);
let account = self
.qs_read
.impersonate_search_ext_uuid(au, &uute.target, &uute.event)
.and_then(|account_entry| {
UnixUserAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
})
.map_err(|e| {
ladmin_error!(au, "Failed to start unix user token -> {:?}", e);
e
})?;
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
);
account.to_unixusertoken()
}
@ -335,13 +346,14 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
au: &mut AuditScope,
uute: &UnixGroupTokenEvent,
) -> Result<UnixGroupToken, OperationError> {
let account_entry = try_audit!(
au,
self.qs_read
.impersonate_search_ext_uuid(au, &uute.target, &uute.event)
);
let group = try_audit!(au, UnixGroup::try_from_entry_reduced(account_entry));
let group = self
.qs_read
.impersonate_search_ext_uuid(au, &uute.target, &uute.event)
.and_then(UnixGroup::try_from_entry_reduced)
.map_err(|e| {
ladmin_error!(au, "Failed to start unix group token {:?}", e);
e
})?;
group.to_unixgrouptoken()
}
}
@ -373,10 +385,10 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// does the password pass zxcvbn?
let entropy = try_audit!(
au,
zxcvbn::zxcvbn(cleartext, related_inputs).map_err(|_| OperationError::PasswordEmpty)
);
let entropy = zxcvbn::zxcvbn(cleartext, related_inputs).map_err(|e| {
ladmin_error!(au, "zxcvbn check failure (password empty?) {:?}", e);
OperationError::PasswordEmpty
})?;
// check account pwpolicy (for 3 or 4)? Do we need pw strength beyond this
// or should we be enforcing mfa instead
@ -402,16 +414,19 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// check a password badlist to eliminate more content
// we check the password as "lower case" to help eliminate possibilities
let lc_password = PartialValue::new_iutf8s(cleartext);
let badlist_entry = try_audit!(
au,
self.qs_write.internal_search_uuid(au, &UUID_SYSTEM_CONFIG)
);
let badlist_entry = self
.qs_write
.internal_search_uuid(au, &UUID_SYSTEM_CONFIG)
.map_err(|e| {
ladmin_error!(au, "Failed to retrieve system configuration {:?}", e);
e
})?;
if badlist_entry.attribute_value_pres("badlist_password", &lc_password) {
lsecurity!(au, "Password found in badlist, rejecting");
return Err(OperationError::PasswordBadListed);
Err(OperationError::PasswordBadListed)
} else {
Ok(())
}
Ok(())
}
fn target_to_account(
@ -420,11 +435,16 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
target: &Uuid,
) -> Result<Account, OperationError> {
// Get the account
let account_entry = try_audit!(au, self.qs_write.internal_search_uuid(au, target));
let account = try_audit!(
au,
Account::try_from_entry_rw(au, account_entry, &mut self.qs_write)
);
let account = self
.qs_write
.internal_search_uuid(au, target)
.and_then(|account_entry| {
Account::try_from_entry_rw(au, account_entry, &mut self.qs_write)
})
.map_err(|e| {
ladmin_error!(au, "Failed to search account {:?}", e);
e
})?;
// Ask if tis all good - this step checks pwpolicy and such
// Deny the change if the account is anonymous!
@ -462,10 +482,12 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
})?;
// it returns a modify
let modlist = try_audit!(
au,
account.gen_password_mod(pce.cleartext.as_str(), &pce.appid)
);
let modlist = account
.gen_password_mod(pce.cleartext.as_str(), &pce.appid)
.map_err(|e| {
ladmin_error!(au, "Failed to generate password mod {:?}", e);
e
})?;
ltrace!(au, "processing change {:?}", modlist);
// given the new credential generate a modify
// We use impersonate here to get the event from ae
@ -493,12 +515,17 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
pce: &UnixPasswordChangeEvent,
) -> Result<(), OperationError> {
// Get the account
let account_entry = try_audit!(au, self.qs_write.internal_search_uuid(au, &pce.target));
// Assert the account is unix and valid.
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_rw(au, account_entry, &mut self.qs_write)
);
let account = self
.qs_write
.internal_search_uuid(au, &pce.target)
.and_then(|account_entry| {
// Assert the account is unix and valid.
UnixUserAccount::try_from_entry_rw(au, account_entry, &mut self.qs_write)
})
.map_err(|e| {
ladmin_error!(au, "Failed to start set unix account password {:?}", e);
e
})?;
// Ask if tis all good - this step checks pwpolicy and such
// Deny the change if the account is anonymous!
@ -513,13 +540,19 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
account.spn.as_str(),
];
try_audit!(
au,
self.check_password_quality(au, pce.cleartext.as_str(), related_inputs.as_slice())
);
self.check_password_quality(au, pce.cleartext.as_str(), related_inputs.as_slice())
.map_err(|e| {
ladmin_error!(au, "Failed to checked password quality {:?}", e);
e
})?;
// it returns a modify
let modlist = try_audit!(au, account.gen_password_mod(pce.cleartext.as_str()));
let modlist = account
.gen_password_mod(pce.cleartext.as_str())
.map_err(|e| {
ladmin_error!(au, "Unable to generate password change modlist {:?}", e);
e
})?;
ltrace!(au, "processing change {:?}", modlist);
// given the new credential generate a modify
// We use impersonate here to get the event from ae
@ -536,9 +569,8 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.map_err(|e| {
lrequest_error!(au, "error -> {:?}", e);
e
})?;
Ok(())
})
.map(|_| ())
}
pub fn recover_account(
@ -548,7 +580,10 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
cleartext: String,
) -> Result<(), OperationError> {
// name to uuid
let target = try_audit!(au, self.qs_write.name_to_uuid(au, name.as_str()));
let target = self.qs_write.name_to_uuid(au, name.as_str()).map_err(|e| {
ladmin_error!(au, "name to uuid failed {:?}", e);
e
})?;
// internal pce.
let pce = PasswordChangeEvent::new_internal(&target, cleartext.as_str(), None);
// now set_account_password.
@ -571,13 +606,18 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// reuse something that has been disclosed.
// it returns a modify
let modlist = try_audit!(au, account.gen_password_mod(cleartext.as_str(), &gpe.appid));
let modlist = account
.gen_password_mod(cleartext.as_str(), &gpe.appid)
.map_err(|e| {
ladmin_error!(au, "Unable to generate password mod {:?}", e);
e
})?;
ltrace!(au, "processing change {:?}", modlist);
// given the new credential generate a modify
// We use impersonate here to get the event from ae
try_audit!(
au,
self.qs_write.impersonate_modify(
self.qs_write
.impersonate_modify(
au,
// Filter as executed
filter!(f_eq("uuid", PartialValue::new_uuidr(&gpe.target))),
@ -587,9 +627,11 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// Provide the event to impersonate
&gpe.event,
)
);
Ok(cleartext)
.map(|_| cleartext)
.map_err(|e| {
ladmin_error!(au, "Failed to generate account password {:?}", e);
e
})
}
pub fn regenerate_radius_secret(
@ -604,7 +646,12 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let cleartext = readable_password_from_random();
// Create a modlist from the change.
let modlist = try_audit!(au, account.regenerate_radius_secret_mod(cleartext.as_str()));
let modlist = account
.regenerate_radius_secret_mod(cleartext.as_str())
.map_err(|e| {
ladmin_error!(au, "Unable to generate radius secret mod {:?}", e);
e
})?;
ltrace!(au, "processing change {:?}", modlist);
// Apply it.
@ -622,9 +669,8 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.map_err(|e| {
lrequest_error!(au, "error -> {:?}", e);
e
})?;
Ok(cleartext)
})
.map(|_| cleartext)
}
pub fn generate_account_totp(
@ -638,10 +684,11 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let origin = (&gte.event.origin).into();
let label = gte.label.clone();
let (session, next) = try_audit!(
au,
MfaRegSession::new(origin, account, MfaReqInit::TOTP(label))
);
let (session, next) = MfaRegSession::new(origin, account, MfaReqInit::TOTP(label))
.map_err(|e| {
ladmin_error!(au, "Unable to start totp MfaRegSession {:?}", e);
e
})?;
let next = next.to_proto(&sessionid);
@ -663,44 +710,44 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
ltrace!(au, "Attempting to find mfareg_session -> {:?}", sessionid);
let (next, opt_cred) = {
// bound the life time of the session get_mut
let session = try_audit!(
au,
self.mfareg_sessions
.get_mut(&sessionid)
.ok_or(OperationError::InvalidRequestState)
);
try_audit!(
au,
let (next, opt_cred) = self
.mfareg_sessions
.get_mut(&sessionid)
.ok_or(OperationError::InvalidRequestState)
.and_then(|session| {
session.step(&origin, &vte.target, MfaReqStep::TOTPVerify(chal), &ct)
)
};
})
.map_err(|e| {
ladmin_error!(au, "Failed to verify totp {:?}", e);
e
})?;
match (&next, opt_cred) {
(MfaRegNext::Success, Some(MfaRegCred::TOTP(token))) => {
// Purge the session.
let session = self
.mfareg_sessions
.remove(&sessionid)
.expect("Session within transaction vanished!");
// reg the token
let modlist = try_audit!(au, session.account.gen_totp_mod(token));
// Perform the mod
try_audit!(
if let (MfaRegNext::Success, Some(MfaRegCred::TOTP(token))) = (&next, opt_cred) {
// Purge the session.
let session = self
.mfareg_sessions
.remove(&sessionid)
.expect("Session within transaction vanished!");
// reg the token
let modlist = session.account.gen_totp_mod(token).map_err(|e| {
ladmin_error!(au, "Failed to gen totp mod {:?}", e);
e
})?;
// Perform the mod
self.qs_write
.impersonate_modify(
au,
self.qs_write.impersonate_modify(
au,
// Filter as executed
filter!(f_eq("uuid", PartialValue::new_uuidr(&session.account.uuid))),
// Filter as intended (acp)
filter_all!(f_eq("uuid", PartialValue::new_uuidr(&session.account.uuid))),
modlist,
&vte.event,
)
);
}
_ => {}
// Filter as executed
filter!(f_eq("uuid", PartialValue::new_uuidr(&session.account.uuid))),
// Filter as intended (acp)
filter_all!(f_eq("uuid", PartialValue::new_uuidr(&session.account.uuid))),
modlist,
&vte.event,
)
.map_err(|e| {
ladmin_error!(au, "verify_account_totp {:?}", e);
e
})?;
};
let next = next.to_proto(&sessionid);
@ -708,7 +755,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
}
pub fn commit(self, au: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
lperf_trace_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
self.mfareg_sessions.commit();
self.qs_write.commit(au)
})

View file

@ -14,6 +14,9 @@ use uuid::Uuid;
use regex::Regex;
// Clippy doesn't like Bind here. But proto needs unboxed ldapmsg,
// and ldapboundtoken is moved. Really, it's not too bad, every message here is pretty sucky.
#[allow(clippy::large_enum_variant)]
pub enum LdapResponseState {
Unbind,
Disconnect(LdapMsg),
@ -168,7 +171,7 @@ impl LdapServer {
};
// TODO #67: limit the number of attributes here!
let attrs = if sr.attrs.len() == 0 {
let attrs = if sr.attrs.is_empty() {
// If [], then "all" attrs
None
} else {
@ -242,7 +245,7 @@ impl LdapServer {
// Build the event, with the permissions from effective_uuid
// (should always be anonymous at the moment)
// ! Remember, searchEvent wraps to ignore hidden for us.
let se = lperf_segment!(au, "ldap::do_search<core><prepare_se>", || {
let se = lperf_trace_segment!(au, "ldap::do_search<core><prepare_se>", || {
SearchEvent::new_ext_impersonate_uuid(
au,
&mut idm_read.qs_read,
@ -258,18 +261,19 @@ impl LdapServer {
})?;
// These have already been fully reduced, so we can just slap it into the result.
let lres = lperf_segment!(au, "ldap::do_search<core><prepare results>", || {
let lres: Result<Vec<_>, _> = res
.into_iter()
.map(|e| {
e.to_ldap(au, &mut idm_read.qs_read, self.basedn.as_str())
// if okay, wrap in a ldap msg.
.map(|r| sr.gen_result_entry(r))
})
.chain(iter::once(Ok(sr.gen_success())))
.collect();
lres
});
let lres =
lperf_trace_segment!(au, "ldap::do_search<core><prepare results>", || {
let lres: Result<Vec<_>, _> = res
.into_iter()
.map(|e| {
e.to_ldap(au, &mut idm_read.qs_read, self.basedn.as_str())
// if okay, wrap in a ldap msg.
.map(|r| sr.gen_result_entry(r))
})
.chain(iter::once(Ok(sr.gen_success())))
.collect();
lres
});
let lres = lres.map_err(|e| {
ladmin_error!(au, "entry resolve failure {:?}", e);
@ -304,7 +308,7 @@ impl LdapServer {
let target_uuid: Uuid = if dn == "" {
if pw == "" {
lsecurity!(au, "✅ LDAP Bind success anonymous");
UUID_ANONYMOUS.clone()
*UUID_ANONYMOUS
} else {
lsecurity!(au, "❌ LDAP Bind failure anonymous");
// Yeah-nahhhhh

View file

@ -17,7 +17,7 @@ mod utils;
#[macro_use]
mod async_log;
#[macro_use]
mod audit;
pub mod audit;
pub mod be;
pub mod constants;
pub mod credential;

View file

@ -15,7 +15,7 @@ macro_rules! run_test_no_init {
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None);
let be = match Backend::new(&mut audit, "", 1) {
Ok(be) => be,
@ -55,7 +55,7 @@ macro_rules! run_test {
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None);
let be = match Backend::new(&mut audit, "", 1) {
Ok(be) => be,
@ -122,7 +122,7 @@ macro_rules! run_idm_test {
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None);
let be = Backend::new(&mut audit, "", 1).expect("Failed to init be");
let schema_outer = Schema::new(&mut audit).expect("Failed to init schema");

View file

@ -16,8 +16,11 @@ pub struct ModifyValid;
pub struct ModifyInvalid;
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum Modify {
// This value *should* exist.
// Clippy doesn't like value here, as value > pv. It could be an improvement to
// box here, but not sure. ... TODO and thought needed.
Present(String, Value),
// This value *should not* exist.
Removed(String, PartialValue),

View file

@ -80,7 +80,10 @@ fn enforce_unique<STATE>(
// Build a set of all the value -> uuid for the cands.
// If already exist, reject due to dup.
let cand_attr = try_audit!(au, get_cand_attr_set(au, cand, attr));
let cand_attr = get_cand_attr_set(au, cand, attr).map_err(|e| {
ladmin_error!(au, "failed to get cand attr set {:?}", e);
e
})?;
ltrace!(au, "{:?}", cand_attr);
@ -107,16 +110,19 @@ fn enforce_unique<STATE>(
ltrace!(au, "{:?}", filt_in);
// If any results, reject.
let conflict_cand = try_audit!(au, qs.internal_exists(au, filt_in));
let conflict_cand = qs.internal_exists(au, filt_in).map_err(|e| {
ladmin_error!(au, "internal exists error {:?}", e);
e
})?;
// If all okay, okay!
if conflict_cand {
return Err(OperationError::Plugin(PluginError::AttrUnique(
Err(OperationError::Plugin(PluginError::AttrUnique(
"duplicate value detected".to_string(),
)));
)))
} else {
Ok(())
}
Ok(())
}
impl Plugin for AttrUnique {
@ -210,8 +216,6 @@ mod tests {
fn test_pre_create_name_unique() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson"],
@ -240,8 +244,6 @@ mod tests {
fn test_pre_create_name_unique_2() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson"],
@ -273,8 +275,6 @@ mod tests {
fn test_pre_modify_name_unique() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -285,8 +285,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -320,8 +318,6 @@ mod tests {
fn test_pre_modify_name_unique_2() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -332,8 +328,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],

View file

@ -1,6 +1,5 @@
use crate::plugins::Plugin;
use std::collections::BTreeSet;
// TODO: Should be able to generate all uuid's via Value.
use uuid::Uuid;
use crate::audit::AuditScope;
@ -78,14 +77,15 @@ impl Plugin for Base {
// Should this be forgiving and just generate the UUID?
// NO! If you tried to specify it, but didn't give it, then you made
// a mistake and your intent is unknown.
let v: Value = try_audit!(
au,
u.first()
.ok_or_else(|| OperationError::Plugin(PluginError::Base(
"Uuid format invalid".to_string()
)))
.map(|v| (*v).clone())
);
let v: Value = u
.first()
.ok_or_else(|| {
ladmin_error!(au, "Uuid format invalid");
OperationError::Plugin(PluginError::Base(
"Uuid format invalid".to_string(),
))
})
.map(|v| (*v).clone())?;
v
}
None => Value::new_uuid(Uuid::new_v4()),
@ -274,8 +274,6 @@ mod tests {
use kanidm_proto::v1::{OperationError, PluginError};
const JSON_ADMIN_ALLOW_ALL: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": [
"object",
@ -444,8 +442,6 @@ mod tests {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson"],
@ -481,8 +477,6 @@ mod tests {
fn test_pre_create_uuid_exist() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson"],
@ -514,8 +508,6 @@ mod tests {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson_a"],
@ -528,8 +520,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson_a"],
@ -559,8 +549,6 @@ mod tests {
// Add another uuid to a type
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -590,8 +578,6 @@ mod tests {
// Test attempting to remove a uuid
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -621,8 +607,6 @@ mod tests {
// Test attempting to purge uuid
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],

View file

@ -36,7 +36,14 @@ fn apply_gidnumber<T: Clone>(
|| e.attribute_value_pres("class", &CLASS_POSIXACCOUNT))
&& !e.attribute_pres("gidnumber")
{
let u_ref = try_audit!(au, e.get_uuid().ok_or(OperationError::InvalidEntryState));
let u_ref = e
.get_uuid()
.ok_or(OperationError::InvalidEntryState)
.map_err(|e| {
ladmin_error!(au, "Invalid Entry State - Missing UUID");
e
})?;
let gid = uuid_to_gid_u32(u_ref);
// assert the value is greater than the system range.
if gid < GID_SYSTEM_NUMBER_MIN {
@ -123,8 +130,6 @@ mod tests {
fn test_gidnumber_create_generate() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "posixaccount"],
"name": ["testperson"],
@ -157,8 +162,6 @@ mod tests {
fn test_gidnumber_create_noaction() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "posixaccount"],
"name": ["testperson"],
@ -192,8 +195,6 @@ mod tests {
fn test_gidnumber_modify_generate() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson"],
@ -259,8 +260,6 @@ mod tests {
fn test_gidnumber_modify_noregen() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "posixaccount"],
"name": ["testperson"],

View file

@ -50,7 +50,7 @@ macro_rules! run_create_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_create_test", uuid::Uuid::new_v4());
let mut au = AuditScope::new("run_create_test", uuid::Uuid::new_v4(), None);
lperf_segment!(&mut au, "plugins::macros::run_create_test", || {
let qs = setup_test!(&mut au, $preload_entries);
@ -105,7 +105,7 @@ macro_rules! run_modify_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_modify_test", uuid::Uuid::new_v4());
let mut au = AuditScope::new("run_modify_test", uuid::Uuid::new_v4(), None);
lperf_segment!(&mut au, "plugins::macros::run_modify_test", || {
let qs = setup_test!(&mut au, $preload_entries);
@ -167,7 +167,7 @@ macro_rules! run_delete_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_delete_test", uuid::Uuid::new_v4());
let mut au = AuditScope::new("run_delete_test", uuid::Uuid::new_v4(), None);
lperf_segment!(&mut au, "plugins::macros::run_delete_test", || {
let qs = setup_test!(&mut au, $preload_entries);

View file

@ -94,18 +94,20 @@ fn apply_memberof(
// Now work on the affected set.
// For each affected uuid
for a_uuid in affected_uuids {
affected_uuids.into_iter().try_for_each(|a_uuid| {
// search where group + Eq("member": "uuid")
let groups = try_audit!(
au,
qs.internal_search(
let groups = qs
.internal_search(
au,
filter!(f_and!([
f_eq("class", CLASS_GROUP.clone()),
f_eq("member", PartialValue::new_refer_r(a_uuid))
]))
])),
)
);
.map_err(|e| {
ladmin_error!(au, "internal search failure -> {:?}", e);
e
})?;
// get UUID of all groups + all memberof values
let mut dir_mo_set: Vec<Value> = groups
.iter()
@ -171,17 +173,16 @@ fn apply_memberof(
// apply to affected uuid
let modlist = ModifyList::new_list(mod_set);
try_audit!(
qs.internal_modify(
au,
qs.internal_modify(
au,
filter!(f_eq("uuid", PartialValue::new_uuid(*a_uuid))),
modlist,
)
);
}
Ok(())
filter!(f_eq("uuid", PartialValue::new_uuid(*a_uuid))),
modlist,
)
.map_err(|e| {
ladmin_error!(au, "Internal modification failure -> {:?}", e);
e
})
})
}
impl Plugin for MemberOf {
@ -402,8 +403,6 @@ mod tests {
const UUID_D: &'static str = "dddddddd-2ab3-48e3-938d-1b4754cd2984";
const EA: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group", "memberof"],
"name": ["testgroup_a"],
@ -412,8 +411,6 @@ mod tests {
}"#;
const EB: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group", "memberof"],
"name": ["testgroup_b"],
@ -422,8 +419,6 @@ mod tests {
}"#;
const EC: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group", "memberof"],
"name": ["testgroup_c"],
@ -432,8 +427,6 @@ mod tests {
}"#;
const ED: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group", "memberof"],
"name": ["testgroup_d"],

View file

@ -136,7 +136,7 @@ macro_rules! run_pre_create_transform_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!($au, <$target_plugin>::id(), || {
let r = lperf_trace_segment!($au, <$target_plugin>::id(), || {
<$target_plugin>::pre_create_transform($au, $qs, $cand, $ce)
});
r
@ -151,7 +151,7 @@ macro_rules! run_pre_create_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!(
let r = lperf_trace_segment!(
$au,
<$target_plugin>::id(),
|| <$target_plugin>::pre_create($au, $qs, $cand, $ce,)
@ -168,7 +168,7 @@ macro_rules! run_post_create_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!($au, <$target_plugin>::id(), || {
let r = lperf_trace_segment!($au, <$target_plugin>::id(), || {
<$target_plugin>::post_create($au, $qs, $cand, $ce)
});
r
@ -183,7 +183,7 @@ macro_rules! run_pre_modify_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!(
let r = lperf_trace_segment!(
$au,
<$target_plugin>::id(),
|| <$target_plugin>::pre_modify($au, $qs, $cand, $ce)
@ -201,7 +201,7 @@ macro_rules! run_post_modify_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!($au, <$target_plugin>::id(), || {
let r = lperf_trace_segment!($au, <$target_plugin>::id(), || {
<$target_plugin>::post_modify($au, $qs, $pre_cand, $cand, $ce)
});
r
@ -216,7 +216,7 @@ macro_rules! run_pre_delete_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!(
let r = lperf_trace_segment!(
$au,
<$target_plugin>::id(),
|| <$target_plugin>::pre_delete($au, $qs, $cand, $ce,)
@ -233,7 +233,7 @@ macro_rules! run_post_delete_plugin {
$ce:ident,
$target_plugin:ty
) => {{
let r = lperf_segment!($au, <$target_plugin>::id(), || {
let r = lperf_trace_segment!($au, <$target_plugin>::id(), || {
<$target_plugin>::post_delete($au, $qs, $cand, $ce)
});
r
@ -247,7 +247,7 @@ macro_rules! run_verify_plugin {
$results:expr,
$target_plugin:ty
) => {{
let mut r = lperf_segment!($au, <$target_plugin>::id(), || <$target_plugin>::verify(
let mut r = lperf_trace_segment!($au, <$target_plugin>::id(), || <$target_plugin>::verify(
$au, $qs,
));
$results.append(&mut r);

View file

@ -36,11 +36,11 @@ impl Plugin for PasswordImport {
// Until upstream btreeset supports first(), we need to convert to a vec.
let vs: Vec<_> = vs.into_iter().collect();
debug_assert!(vs.len() >= 1);
debug_assert!(!vs.is_empty());
let im_pw = vs.first()
.unwrap()
.to_str()
.ok_or(OperationError::Plugin(PluginError::PasswordImport("password_import has incorrect value type".to_string())))?;
.ok_or_else(|| OperationError::Plugin(PluginError::PasswordImport("password_import has incorrect value type".to_string())))?;
// convert the import_password to a cred
let pw = Password::try_from(im_pw)
@ -86,10 +86,12 @@ impl Plugin for PasswordImport {
// Until upstream btreeset supports first(), we need to convert to a vec.
let vs: Vec<_> = vs.into_iter().collect();
debug_assert!(vs.len() >= 1);
let im_pw = vs.first().unwrap().to_str().ok_or(OperationError::Plugin(
PluginError::PasswordImport("password_import has incorrect value type".to_string()),
))?;
debug_assert!(!vs.is_empty());
let im_pw = vs.first().unwrap().to_str().ok_or_else(|| {
OperationError::Plugin(PluginError::PasswordImport(
"password_import has incorrect value type".to_string(),
))
})?;
// convert the import_password to a cred
let pw = Password::try_from(im_pw).map_err(|_| {
@ -164,8 +166,6 @@ mod tests {
// Add another uuid to a type
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "person"],
"name": ["testperson"],
@ -196,8 +196,6 @@ mod tests {
// Add another uuid to a type
let mut ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "person"],
"name": ["testperson"],
@ -231,8 +229,6 @@ mod tests {
// Add another uuid to a type
let mut ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account", "person"],
"name": ["testperson"],

View file

@ -104,12 +104,12 @@ impl Plugin for Protected {
Modify::Present(a, v) => {
// TODO: Can we avoid this clone?
if a == "class"
&& (v == &(VCLASS_SYSTEM.clone())
|| v == &(VCLASS_DOMAIN_INFO.clone())
|| v == &(VCLASS_SYSTEM_INFO.clone())
|| v == &(VCLASS_SYSTEM_CONFIG.clone())
|| v == &(VCLASS_TOMBSTONE.clone())
|| v == &(VCLASS_RECYCLED.clone()))
&& (v == &(*VCLASS_SYSTEM)
|| v == &(*VCLASS_DOMAIN_INFO)
|| v == &(*VCLASS_SYSTEM_INFO)
|| v == &(*VCLASS_SYSTEM_CONFIG)
|| v == &(*VCLASS_TOMBSTONE)
|| v == &(*VCLASS_RECYCLED))
{
Err(OperationError::SystemProtectedObject)
} else {
@ -214,8 +214,6 @@ mod tests {
use kanidm_proto::v1::OperationError;
const JSON_ADMIN_ALLOW_ALL: &'static str = r#"{
"valid": null,
"state": null,
"attrs": {
"class": [
"object",
@ -253,8 +251,6 @@ mod tests {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person", "system"],
"name": ["testperson"],
@ -281,8 +277,6 @@ mod tests {
// Test modify of class to a system is denied
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person", "system"],
"name": ["testperson"],
@ -313,37 +307,6 @@ mod tests {
// Show that adding a system class is denied
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person"],
"name": ["testperson"],
"description": ["testperson"],
"displayname": ["testperson"]
}
}"#,
);
let preload = vec![acp, e.clone()];
run_modify_test!(
Err(OperationError::SystemProtectedObject),
preload,
filter!(f_eq("name", PartialValue::new_iname("testperson"))),
modlist!([m_pres("class", &Value::new_class("system")),]),
Some(JSON_ADMIN_V1),
|_, _| {}
);
}
#[test]
fn test_pre_modify_attr_must_may_allow() {
let acp: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_ADMIN_ALLOW_ALL);
// Show that adding a system class is denied
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["testclass"],
@ -374,8 +337,6 @@ mod tests {
// Test deleting with class: system is rejected.
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["person", "system"],
"name": ["testperson"],

View file

@ -34,19 +34,17 @@ impl ReferentialIntegrity {
rtype: &str,
uuid_value: &Value,
) -> Result<(), OperationError> {
let uuid = try_audit!(
au,
uuid_value
.to_ref_uuid()
.ok_or_else(|| OperationError::InvalidAttribute(
"uuid could not become reference value".to_string()
))
);
let uuid = uuid_value.to_ref_uuid().ok_or_else(|| {
ladmin_error!(au, "uuid value could not convert to reference uuid");
OperationError::InvalidAttribute("uuid could not become reference value".to_string())
})?;
// NOTE: This only checks LIVE entries (not using filter_all)
let filt_in = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid)));
let r = qs.internal_exists(au, filt_in);
let b = qs.internal_exists(au, filt_in).map_err(|e| {
ladmin_error!(au, "internal exists failure -> {:?}", e);
e
})?;
let b = try_audit!(au, r);
// Is the reference in the result set?
if b {
Ok(())
@ -248,8 +246,6 @@ mod tests {
fn test_create_uuid_reference_not_exist() {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup"],
@ -277,8 +273,6 @@ mod tests {
fn test_create_uuid_reference_exist() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -290,8 +284,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -328,8 +320,6 @@ mod tests {
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup"],
@ -364,8 +354,6 @@ mod tests {
fn test_modify_uuid_reference_exist() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -377,8 +365,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -407,8 +393,6 @@ mod tests {
fn test_modify_uuid_reference_not_exist() {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -439,8 +423,6 @@ mod tests {
fn test_modify_remove_referee() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -452,8 +434,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -480,8 +460,6 @@ mod tests {
fn test_modify_uuid_reference_self() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -511,8 +489,6 @@ mod tests {
fn test_modify_reference_deleted() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group", "recycled"],
"name": ["testgroup_a"],
@ -524,8 +500,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -558,8 +532,6 @@ mod tests {
fn test_delete_remove_referent_valid() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -571,8 +543,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -604,8 +574,6 @@ mod tests {
fn test_delete_remove_referee() {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_a"],
@ -617,8 +585,6 @@ mod tests {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],
@ -644,8 +610,6 @@ mod tests {
fn test_delete_remove_reference_self() {
let eb: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["group"],
"name": ["testgroup_b"],

View file

@ -69,7 +69,8 @@ impl Plugin for Spn {
// needed to validate is the same as generation, so we may as well
// just generate and set blindly when required.
// TODO: Should we work out what classes dynamically from schema into a filter?
// Should we work out what classes dynamically from schema into a filter?
// No - types that are trust replicated are fixed.
let mut domain_name: Option<String> = None;
for e in cand.iter_mut() {
@ -154,7 +155,7 @@ impl Plugin for Spn {
) -> Result<(), OperationError> {
// On modify, if changing domain_name on UUID_DOMAIN_INFO
// trigger the spn regen ... which is expensive. Future
// todo will be improvements to modify on large txns.
// TODO #157: will be improvements to modify on large txns.
let domain_name_changed =
cand.iter()
@ -278,8 +279,6 @@ mod tests {
// on create don't provide
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson"],
@ -307,8 +306,6 @@ mod tests {
// on a purge of the spen, generate it.
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson"],
@ -335,8 +332,6 @@ mod tests {
// on create providing invalid spn, we over-write it.
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"spn": ["testperson@invalid_domain.com"],
@ -364,8 +359,6 @@ mod tests {
// On modify (removed/present) of the spn, just regenerate it.
let e: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["account"],
"name": ["testperson"],

View file

@ -117,52 +117,46 @@ impl SchemaAttribute {
let uuid = *value.get_uuid();
// name
let name = try_audit!(
audit,
value.get_ava_single_string("attributename").ok_or_else(|| {
let name = value
.get_ava_single_string("attributename")
.ok_or_else(|| {
ladmin_error!(audit, "missing attributename");
OperationError::InvalidSchemaState("missing attributename".to_string())
})
);
})?;
// description
let description = try_audit!(
audit,
value.get_ava_single_string("description").ok_or_else(|| {
OperationError::InvalidSchemaState("missing description".to_string())
})
);
let description = value.get_ava_single_string("description").ok_or_else(|| {
ladmin_error!(audit, "missing description");
OperationError::InvalidSchemaState("missing description".to_string())
})?;
// multivalue
let multivalue = try_audit!(
audit,
value.get_ava_single_bool("multivalue").ok_or_else(|| {
OperationError::InvalidSchemaState("missing multivalue".to_string())
})
);
let unique = try_audit!(
audit,
value
.get_ava_single_bool("unique")
.ok_or_else(|| OperationError::InvalidSchemaState("missing unique".to_string()))
);
let multivalue = value.get_ava_single_bool("multivalue").ok_or_else(|| {
ladmin_error!(audit, "missing multivalue");
OperationError::InvalidSchemaState("missing multivalue".to_string())
})?;
let unique = value.get_ava_single_bool("unique").ok_or_else(|| {
ladmin_error!(audit, "missing unique");
OperationError::InvalidSchemaState("missing unique".to_string())
})?;
let phantom = value.get_ava_single_bool("phantom").unwrap_or(false);
// index vec
// even if empty, it SHOULD be present ... (is that value to put an empty set?)
// The get_ava_opt_index handles the optional case for us :)
let index = try_audit!(
audit,
value
.get_ava_opt_index("index")
.and_then(|vv: Vec<&IndexType>| Ok(vv.into_iter().cloned().collect()))
.map_err(|_| OperationError::InvalidSchemaState("Invalid index".to_string()))
);
let index = value
.get_ava_opt_index("index")
.and_then(|vv: Vec<&IndexType>| Ok(vv.into_iter().cloned().collect()))
.map_err(|_| {
ladmin_error!(audit, "invalid index");
OperationError::InvalidSchemaState("Invalid index".to_string())
})?;
// syntax type
let syntax = try_audit!(
audit,
value
.get_ava_single_syntax("syntax")
.cloned()
.ok_or_else(|| OperationError::InvalidSchemaState("missing syntax".to_string()))
);
let syntax = value
.get_ava_single_syntax("syntax")
.cloned()
.ok_or_else(|| {
ladmin_error!(audit, "missing syntax");
OperationError::InvalidSchemaState("missing syntax".to_string())
})?;
Ok(SchemaAttribute {
name,
@ -176,7 +170,7 @@ impl SchemaAttribute {
})
}
// TODO: There may be a difference between a value and a filter value on complex
// There may be a difference between a value and a filter value on complex
// types - IE a complex type may have multiple parts that are secret, but a filter
// on that may only use a single tagged attribute for example.
pub fn validate_partialvalue(&self, a: &str, v: &PartialValue) -> Result<(), SchemaError> {
@ -425,31 +419,31 @@ impl SchemaClass {
let uuid = *value.get_uuid();
// name
let name = try_audit!(
audit,
value
.get_ava_single_string("classname")
.ok_or_else(|| OperationError::InvalidSchemaState("missing classname".to_string()))
);
let name = value.get_ava_single_string("classname").ok_or_else(|| {
ladmin_error!(audit, "missing classname");
OperationError::InvalidSchemaState("missing classname".to_string())
})?;
// description
let description = try_audit!(
audit,
value.get_ava_single_string("description").ok_or_else(|| {
OperationError::InvalidSchemaState("missing description".to_string())
})
);
let description = value.get_ava_single_string("description").ok_or_else(|| {
ladmin_error!(audit, "missing description");
OperationError::InvalidSchemaState("missing description".to_string())
})?;
// These are all "optional" lists of strings.
let systemmay = value.get_ava_opt_string("systemmay").ok_or_else(|| {
ladmin_error!(audit, "missing or invalid systemmay");
OperationError::InvalidSchemaState("Missing or invalid systemmay".to_string())
})?;
let systemmust = value.get_ava_opt_string("systemmust").ok_or_else(|| {
ladmin_error!(audit, "missing or invalid systemmust");
OperationError::InvalidSchemaState("Missing or invalid systemmust".to_string())
})?;
let may = value.get_ava_opt_string("may").ok_or_else(|| {
ladmin_error!(audit, "missing or invalid may");
OperationError::InvalidSchemaState("Missing or invalid may".to_string())
})?;
let must = value.get_ava_opt_string("must").ok_or_else(|| {
ladmin_error!(audit, "missing or invalid must");
OperationError::InvalidSchemaState("Missing or invalid must".to_string())
})?;
@ -634,7 +628,7 @@ impl<'a> SchemaWriteTransaction<'a> {
}
pub fn generate_in_memory(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
let r = lperf_segment!(audit, "schema::generate_in_memory", || {
lperf_trace_segment!(audit, "schema::generate_in_memory", || {
//
self.classes.clear();
self.attributes.clear();
@ -1410,8 +1404,7 @@ impl<'a> SchemaWriteTransaction<'a> {
ladmin_info!(audit, "schema validate -> errors {:?}", r);
Err(OperationError::ConsistencyError(r))
}
});
r
})
}
}
@ -1532,8 +1525,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1547,8 +1538,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1565,8 +1554,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1584,8 +1571,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1603,8 +1588,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1623,8 +1606,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1642,8 +1623,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "attributetype"],
"attributename": ["schema_attr_test"],
@ -1666,8 +1645,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1680,8 +1657,6 @@ mod tests {
sch_from_entry_err!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object"],
"classname": ["schema_class_test"],
@ -1696,8 +1671,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1712,8 +1685,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1728,8 +1699,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1744,8 +1713,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1761,8 +1728,6 @@ mod tests {
sch_from_entry_ok!(
audit,
r#"{
"valid": null,
"state": null,
"attrs": {
"class": ["object", "classtype"],
"classname": ["schema_class_test"],
@ -1916,7 +1881,7 @@ mod tests {
#[test]
fn test_schema_simple() {
let mut audit = AuditScope::new("test_schema_simple", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_schema_simple", uuid::Uuid::new_v4(), None);
let schema = Schema::new(&mut audit).expect("failed to create schema");
let schema_ro = schema.read();
validate_schema!(schema_ro, &mut audit);
@ -1927,7 +1892,7 @@ mod tests {
fn test_schema_entries() {
// Given an entry, assert it's schema is valid
// We do
let mut audit = AuditScope::new("test_schema_entries", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_schema_entries", uuid::Uuid::new_v4(), None);
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
let e_no_uuid: Entry<EntryInvalid, EntryNew> = unsafe {
@ -2081,7 +2046,7 @@ mod tests {
#[test]
fn test_schema_entry_validate() {
// Check that entries can be normalised and validated sanely
let mut audit = AuditScope::new("test_schema_entry_validate", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_schema_entry_validate", uuid::Uuid::new_v4(), None);
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.write();
@ -2127,7 +2092,7 @@ mod tests {
#[test]
fn test_schema_extensible() {
let mut audit = AuditScope::new("test_schema_extensible", uuid::Uuid::new_v4());
let mut audit = AuditScope::new("test_schema_extensible", uuid::Uuid::new_v4(), None);
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
// Just because you are extensible, doesn't mean you can be lazy
@ -2191,7 +2156,8 @@ mod tests {
#[test]
fn test_schema_filter_validation() {
let mut audit = AuditScope::new("test_schema_filter_validation", uuid::Uuid::new_v4());
let mut audit =
AuditScope::new("test_schema_filter_validation", uuid::Uuid::new_v4(), None);
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
// Test non existant attr name
@ -2260,7 +2226,11 @@ mod tests {
#[test]
fn test_schema_class_phantom_reject() {
// Check that entries can be normalised and validated sanely
let mut audit = AuditScope::new("test_schema_class_phantom_reject", uuid::Uuid::new_v4());
let mut audit = AuditScope::new(
"test_schema_class_phantom_reject",
uuid::Uuid::new_v4(),
None,
);
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let mut schema = schema_outer.write();

File diff suppressed because it is too large Load diff

View file

@ -5,12 +5,17 @@ use uuid::Uuid;
pub struct StatusActor {
log_tx: Sender<Option<AuditScope>>,
log_level: Option<u32>,
}
impl StatusActor {
pub fn start(log_tx: Sender<Option<AuditScope>>) -> actix::Addr<StatusActor> {
pub fn start(
log_tx: Sender<Option<AuditScope>>,
log_level: Option<u32>,
) -> actix::Addr<StatusActor> {
SyncArbiter::start(1, move || StatusActor {
log_tx: log_tx.clone(),
log_level,
})
}
}
@ -31,7 +36,7 @@ impl Handler<StatusRequestEvent> for StatusActor {
type Result = bool;
fn handle(&mut self, event: StatusRequestEvent, _ctx: &mut SyncContext<Self>) -> Self::Result {
let mut audit = AuditScope::new("status_handler", event.eventid.clone());
let mut audit = AuditScope::new("status_handler", event.eventid, self.log_level);
ladmin_info!(&mut audit, "status handler");
self.log_tx.send(Some(audit)).unwrap_or_else(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");

View file

@ -18,7 +18,7 @@ lazy_static! {
static ref SPN_RE: Regex =
Regex::new("(?P<name>[^@]+)@(?P<realm>[^@]+)").expect("Invalid SPN regex found");
static ref INAME_RE: Regex =
Regex::new("^(_.*|.*(\\s|@|,|=).*|\\d+)$").expect("Invalid Iname regex found");
Regex::new("^(_.*|.*(\\s|@|,|=).*|\\d+|root|nobody|nogroup|wheel|sshd|shadow|systemd.*)$").expect("Invalid Iname regex found");
// ^ ^ ^
// | | \- must not be only integers
// | \- must not contain whitespace, @, ',', =
@ -659,7 +659,6 @@ impl From<IndexType> for Value {
impl From<&str> for Value {
fn from(s: &str) -> Self {
// Fuzzy match for uuid's
// TODO: Will I regret this?
match Uuid::parse_str(s) {
Ok(u) => Value {
pv: PartialValue::Uuid(u),
@ -1065,7 +1064,6 @@ impl Value {
// Keep this updated with DbValueV1 in be::dbvalue.
pub(crate) fn from_db_valuev1(v: DbValueV1) -> Result<Self, ()> {
// TODO: Should this actually take ownership? Or do we clone?
match v {
DbValueV1::U8(s) => Ok(Value {
pv: PartialValue::Utf8(s),
@ -1149,7 +1147,7 @@ impl Value {
}
pub(crate) fn to_db_valuev1(&self) -> DbValueV1 {
// TODO: Should this actually take ownership? Or do we clone?
// This has to clone due to how the backend works.
match &self.pv {
PartialValue::Utf8(s) => DbValueV1::U8(s.clone()),
PartialValue::Iutf8(s) => DbValueV1::I8(s.clone()),

View file

@ -1,43 +1,61 @@
#![deny(warnings)]
use serde_derive::Deserialize;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use kanidm::audit::LogLevel;
use kanidm::config::Configuration;
use kanidm::core::{
backup_server_core, create_server_core, domain_rename_core, recover_account_core,
reindex_server_core, restore_server_core, verify_server_core,
};
use log::{error, info};
use structopt::StructOpt;
#[derive(Debug, Deserialize)]
struct ServerConfig {
pub bindaddress: Option<String>,
pub ldapbindaddress: Option<String>,
// pub threads: Option<usize>,
pub db_path: String,
pub tls_ca: Option<String>,
pub tls_cert: Option<String>,
pub tls_key: Option<String>,
pub log_level: Option<String>,
}
impl ServerConfig {
pub fn new<P: AsRef<Path>>(config_path: P) -> Result<Self, ()> {
let mut f = File::open(config_path).map_err(|e| {
eprintln!("Unable to open config file [{:?}] 🥺", e);
})?;
let mut contents = String::new();
f.read_to_string(&mut contents)
.map_err(|e| eprintln!("unable to read contents {:?}", e))?;
toml::from_str(contents.as_str()).map_err(|e| eprintln!("unable to parse config {:?}", e))
}
}
#[derive(Debug, StructOpt)]
struct CommonOpt {
#[structopt(short = "d", long = "debug")]
debug: bool,
#[structopt(parse(from_os_str), short = "D", long = "db_path")]
db_path: PathBuf,
}
#[derive(Debug, StructOpt)]
struct ServerOpt {
#[structopt(parse(from_os_str), short = "C", long = "ca")]
ca_path: Option<PathBuf>,
#[structopt(parse(from_os_str), short = "c", long = "cert")]
cert_path: Option<PathBuf>,
#[structopt(parse(from_os_str), short = "k", long = "key")]
key_path: Option<PathBuf>,
#[structopt(short = "b", long = "bindaddr")]
bind: Option<String>,
#[structopt(short = "l", long = "ldapbindaddr")]
ldapbind: Option<String>,
#[structopt(flatten)]
commonopts: CommonOpt,
/// Logging level. quiet, default, filter, verbose, perffull
debug: Option<LogLevel>,
#[structopt(parse(from_os_str), short = "c", long = "config")]
/// Path to the server's configuration file. If it does not exist, it will be created.
config_path: PathBuf,
}
#[derive(Debug, StructOpt)]
struct BackupOpt {
#[structopt(parse(from_os_str))]
/// Output path for the backup content.
path: PathBuf,
#[structopt(flatten)]
commonopts: CommonOpt,
@ -46,6 +64,7 @@ struct BackupOpt {
#[derive(Debug, StructOpt)]
struct RestoreOpt {
#[structopt(parse(from_os_str))]
/// Restore from this path. Should be created with "backupu".
path: PathBuf,
#[structopt(flatten)]
commonopts: CommonOpt,
@ -54,6 +73,7 @@ struct RestoreOpt {
#[derive(Debug, StructOpt)]
struct RecoverAccountOpt {
#[structopt(short)]
/// The account name to recover credentials for.
name: String,
#[structopt(flatten)]
commonopts: CommonOpt,
@ -62,6 +82,7 @@ struct RecoverAccountOpt {
#[derive(Debug, StructOpt)]
struct DomainOpt {
#[structopt(short)]
/// The new domain name.
new_domain_name: String,
#[structopt(flatten)]
commonopts: CommonOpt,
@ -70,32 +91,38 @@ struct DomainOpt {
#[derive(Debug, StructOpt)]
enum Opt {
#[structopt(name = "server")]
Server(ServerOpt),
/// Start the IDM Server
Server(CommonOpt),
#[structopt(name = "backup")]
/// Backup the database content (offline)
Backup(BackupOpt),
#[structopt(name = "restore")]
/// Restore the database content (offline)
Restore(RestoreOpt),
#[structopt(name = "verify")]
/// Verify database and entity consistency.
Verify(CommonOpt),
#[structopt(name = "recover_account")]
/// Recover an account's password
RecoverAccount(RecoverAccountOpt),
// #[structopt(name = "reset_server_id")]
// ResetServerId(CommonOpt),
#[structopt(name = "reindex")]
/// Reindex the database (offline)
Reindex(CommonOpt),
#[structopt(name = "domain_name_change")]
/// Change the IDM domain name
DomainChange(DomainOpt),
}
impl Opt {
fn debug(&self) -> bool {
fn commonopt(&self) -> &CommonOpt {
match self {
Opt::Server(sopt) => sopt.commonopts.debug,
Opt::Verify(sopt) | Opt::Reindex(sopt) => sopt.debug,
Opt::Backup(bopt) => bopt.commonopts.debug,
Opt::Restore(ropt) => ropt.commonopts.debug,
Opt::RecoverAccount(ropt) => ropt.commonopts.debug,
Opt::DomainChange(dopt) => dopt.commonopts.debug,
Opt::Server(sopt) | Opt::Verify(sopt) | Opt::Reindex(sopt) => &sopt,
Opt::Backup(bopt) => &bopt.commonopts,
Opt::Restore(ropt) => &ropt.commonopts,
Opt::RecoverAccount(ropt) => &ropt.commonopts,
Opt::DomainChange(dopt) => &dopt.commonopts,
}
}
}
@ -105,17 +132,35 @@ async fn main() {
// Read cli args, determine if we should backup/restore
let opt = Opt::from_args();
// Read our config (if any)
// Read our config
let mut config = Configuration::new();
// Apply any cli overrides?
let sconfig = match ServerConfig::new(&(opt.commonopt().config_path)) {
Ok(c) => c,
Err(e) => {
eprintln!("Config Parse failure {:?}", e);
std::process::exit(1);
}
};
// Apply the file requirements
let ll = sconfig
.log_level
.map(|ll| match LogLevel::from_str(ll.as_str()) {
Ok(v) => v as u32,
Err(e) => {
eprintln!("{:?}", e);
std::process::exit(1);
}
});
// Configure the server logger. This could be adjusted based on what config
// says.
if opt.debug() {
::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
} else {
::std::env::set_var("RUST_LOG", "actix_web=info,kanidm=warn");
}
config.update_log_level(ll);
config.update_db_path(&sconfig.db_path.as_str());
config.update_tls(&sconfig.tls_ca, &sconfig.tls_cert, &sconfig.tls_key);
config.update_bind(&sconfig.bindaddress);
config.update_ldapbind(&sconfig.ldapbindaddress);
// Apply any cli overrides, normally debug level.
config.update_log_level(opt.commonopt().debug.as_ref().map(|v| v.clone() as u32));
::std::env::set_var("RUST_LOG", "actix_web=info,kanidm=info");
env_logger::builder()
.format_timestamp(None)
@ -123,13 +168,8 @@ async fn main() {
.init();
match opt {
Opt::Server(sopt) => {
info!("Running in server mode ...");
config.update_db_path(&sopt.commonopts.db_path);
config.update_tls(&sopt.ca_path, &sopt.cert_path, &sopt.key_path);
config.update_bind(&sopt.bind);
config.update_ldapbind(&sopt.ldapbind);
Opt::Server(_sopt) => {
eprintln!("Running in server mode ...");
let sctx = create_server_core(config).await;
match sctx {
@ -139,71 +179,71 @@ async fn main() {
sctx.stop()
}
Err(_) => {
error!("Failed to start server core!");
eprintln!("Failed to start server core!");
return;
}
}
}
Opt::Backup(bopt) => {
info!("Running in backup mode ...");
eprintln!("Running in backup mode ...");
config.update_db_path(&bopt.commonopts.db_path);
// config.update_db_path(&bopt.commonopts.db_path);
let p = match bopt.path.to_str() {
Some(p) => p,
None => {
error!("Invalid backup path");
eprintln!("Invalid backup path");
std::process::exit(1);
}
};
backup_server_core(config, p);
}
Opt::Restore(ropt) => {
info!("Running in restore mode ...");
eprintln!("Running in restore mode ...");
config.update_db_path(&ropt.commonopts.db_path);
// config.update_db_path(&ropt.commonopts.db_path);
let p = match ropt.path.to_str() {
Some(p) => p,
None => {
error!("Invalid restore path");
eprintln!("Invalid restore path");
std::process::exit(1);
}
};
restore_server_core(config, p);
}
Opt::Verify(vopt) => {
info!("Running in db verification mode ...");
Opt::Verify(_vopt) => {
eprintln!("Running in db verification mode ...");
config.update_db_path(&vopt.db_path);
// config.update_db_path(&vopt.db_path);
verify_server_core(config);
}
Opt::RecoverAccount(raopt) => {
info!("Running account recovery ...");
eprintln!("Running account recovery ...");
let password = rpassword::prompt_password_stderr("new password: ").unwrap();
config.update_db_path(&raopt.commonopts.db_path);
// config.update_db_path(&raopt.commonopts.db_path);
recover_account_core(config, raopt.name, password);
}
/*
Opt::ResetServerId(vopt) => {
info!("Resetting server id. THIS WILL BREAK REPLICATION");
eprintln!("Resetting server id. THIS WILL BREAK REPLICATION");
config.update_db_path(&vopt.db_path);
reset_sid_core(config);
}
*/
Opt::Reindex(copt) => {
info!("Running in reindex mode ...");
Opt::Reindex(_copt) => {
eprintln!("Running in reindex mode ...");
config.update_db_path(&copt.db_path);
// config.update_db_path(&copt.db_path);
reindex_server_core(config);
}
Opt::DomainChange(dopt) => {
info!("Running in domain name change mode ... this may take a long time ...");
eprintln!("Running in domain name change mode ... this may take a long time ...");
config.update_db_path(&dopt.commonopts.db_path);
// config.update_db_path(&dopt.commonopts.db_path);
domain_rename_core(config, dopt.new_domain_name);
}
}

View file

@ -1,5 +1,7 @@
Implements # .
Fixes #
- [ ] cargo fmt has been run
- [ ] cargo clippy has been run
- [ ] cargo test has been run and passes
- [ ] book chapter included (if relevant)
- [ ] design document included (if relevant)