|
|
|
|
@ -7,6 +7,7 @@
|
|
|
|
|
* Copyright © 2021-2022 Microsoft Corporation
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <kunit/test.h>
|
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
|
#include <linux/bits.h>
|
|
|
|
|
@ -311,6 +312,119 @@ static bool no_more_access(
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
|
|
|
|
|
#define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
|
|
|
|
|
|
|
|
|
|
static void test_no_more_access(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
|
|
|
|
|
};
|
|
|
|
|
const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
|
|
|
|
|
};
|
|
|
|
|
const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
|
|
|
|
|
};
|
|
|
|
|
const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
|
|
|
|
|
};
|
|
|
|
|
const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
|
|
|
|
|
BIT_ULL(1),
|
|
|
|
|
};
|
|
|
|
|
const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
|
|
|
|
|
|
|
|
|
|
/* Checks without restriction. */
|
|
|
|
|
NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
|
|
|
|
|
NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
|
|
|
|
|
NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Checks that we can only refer a file if no more access could be
|
|
|
|
|
* inherited.
|
|
|
|
|
*/
|
|
|
|
|
NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
|
|
|
|
|
NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
|
|
|
|
|
|
|
|
|
|
/* Checks allowed referring with different nested domains. */
|
|
|
|
|
NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
|
|
|
|
|
NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
|
|
|
|
|
NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
|
|
|
|
|
NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
|
|
|
|
|
NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
|
|
|
|
|
NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
|
|
|
|
|
NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
|
|
|
|
|
|
|
|
|
|
/* Checks that file access rights are also enforced for a directory. */
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
|
|
|
|
|
|
|
|
|
|
/* Checks that directory access rights don't impact file referring... */
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
|
|
|
|
|
/* ...but only directory referring. */
|
|
|
|
|
NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
|
|
|
|
|
|
|
|
|
|
/* Checks directory exchange. */
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
|
|
|
|
|
NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
|
|
|
|
|
NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
|
|
|
|
|
NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
|
|
|
|
|
|
|
|
|
|
/* Checks file exchange with directory access rights... */
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
|
|
|
|
|
NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
|
|
|
|
|
/* ...and with file access rights. */
|
|
|
|
|
NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
|
|
|
|
|
NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
|
|
|
|
|
NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allowing the following requests should not be a security risk
|
|
|
|
|
* because domain 0 denies execute access, and domain 1 is always
|
|
|
|
|
* nested with domain 0. However, adding an exception for this case
|
|
|
|
|
* would mean to check all nested domains to make sure none can get
|
|
|
|
|
* more privileges (e.g. processes only sandboxed by domain 0).
|
|
|
|
|
* Moreover, this behavior (i.e. composition of N domains) could then
|
|
|
|
|
* be inconsistent compared to domain 1's ruleset alone (e.g. it might
|
|
|
|
|
* be denied to link/rename with domain 1's ruleset, whereas it would
|
|
|
|
|
* be allowed if nested on top of domain 0). Another drawback would be
|
|
|
|
|
* to create a cover channel that could enable sandboxed processes to
|
|
|
|
|
* infer most of the filesystem restrictions from their domain. To
|
|
|
|
|
* make it simple, efficient, safe, and more consistent, this case is
|
|
|
|
|
* always denied.
|
|
|
|
|
*/
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
|
|
|
|
|
|
|
|
|
|
/* Checks the same case of exclusive domains with a file... */
|
|
|
|
|
NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
|
|
|
|
|
/* ...and with a directory. */
|
|
|
|
|
NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
|
|
|
|
|
NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
|
|
|
|
|
NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
|
|
|
|
|
|
|
|
|
|
#undef NMA_TRUE
|
|
|
|
|
#undef NMA_FALSE
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Removes @layer_masks accesses that are not requested.
|
|
|
|
|
*
|
|
|
|
|
@ -331,6 +445,57 @@ scope_to_request(const access_mask_t access_request,
|
|
|
|
|
return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
|
|
|
|
|
|
|
|
|
|
static void test_scope_to_request_with_exec_none(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
/* Allows everything. */
|
|
|
|
|
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
|
|
|
|
|
|
|
|
|
|
/* Checks and scopes with execute. */
|
|
|
|
|
KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
|
|
|
|
|
&layer_masks));
|
|
|
|
|
KUNIT_EXPECT_EQ(test, 0,
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
|
|
|
|
|
KUNIT_EXPECT_EQ(test, 0,
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void test_scope_to_request_with_exec_some(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
/* Denies execute and write. */
|
|
|
|
|
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Checks and scopes with execute. */
|
|
|
|
|
KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
|
|
|
|
|
&layer_masks));
|
|
|
|
|
KUNIT_EXPECT_EQ(test, BIT_ULL(0),
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
|
|
|
|
|
KUNIT_EXPECT_EQ(test, 0,
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void test_scope_to_request_without_access(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
/* Denies execute and write. */
|
|
|
|
|
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Checks and scopes without access request. */
|
|
|
|
|
KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
|
|
|
|
|
KUNIT_EXPECT_EQ(test, 0,
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
|
|
|
|
|
KUNIT_EXPECT_EQ(test, 0,
|
|
|
|
|
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Returns true if there is at least one access right different than
|
|
|
|
|
* LANDLOCK_ACCESS_FS_REFER.
|
|
|
|
|
@ -354,6 +519,51 @@ is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
|
|
|
|
|
#define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
|
|
|
|
|
|
|
|
|
|
static void test_is_eacces_with_none(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
|
|
|
|
|
|
|
|
|
|
IE_FALSE(&layer_masks, 0);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void test_is_eacces_with_refer(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
IE_FALSE(&layer_masks, 0);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void test_is_eacces_with_write(struct kunit *const test)
|
|
|
|
|
{
|
|
|
|
|
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
|
|
|
|
|
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
IE_FALSE(&layer_masks, 0);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
|
|
|
|
|
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
|
|
|
|
|
|
|
|
|
|
IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
|
|
|
|
|
|
|
|
|
|
#undef IE_TRUE
|
|
|
|
|
#undef IE_FALSE
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* is_access_to_paths_allowed - Check accesses for requests with a common path
|
|
|
|
|
*
|
|
|
|
|
@ -1225,3 +1435,27 @@ __init void landlock_add_fs_hooks(void)
|
|
|
|
|
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
|
|
|
|
|
&landlock_lsmid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
|
|
|
|
|
|
|
|
|
|
/* clang-format off */
|
|
|
|
|
static struct kunit_case test_cases[] = {
|
|
|
|
|
KUNIT_CASE(test_no_more_access),
|
|
|
|
|
KUNIT_CASE(test_scope_to_request_with_exec_none),
|
|
|
|
|
KUNIT_CASE(test_scope_to_request_with_exec_some),
|
|
|
|
|
KUNIT_CASE(test_scope_to_request_without_access),
|
|
|
|
|
KUNIT_CASE(test_is_eacces_with_none),
|
|
|
|
|
KUNIT_CASE(test_is_eacces_with_refer),
|
|
|
|
|
KUNIT_CASE(test_is_eacces_with_write),
|
|
|
|
|
{}
|
|
|
|
|
};
|
|
|
|
|
/* clang-format on */
|
|
|
|
|
|
|
|
|
|
static struct kunit_suite test_suite = {
|
|
|
|
|
.name = "landlock_fs",
|
|
|
|
|
.test_cases = test_cases,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
kunit_test_suite(test_suite);
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
|
|
|
|
|
|