+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return datalen;
+}
+
+static const struct file_operations ima_measurements_staged_ops = {
+ .open = ima_measurements_staged_open,
+ .read = seq_read,
+ .write = ima_measurements_staged_write,
+ .llseek = seq_lseek,
+ .release = ima_measurements_release,
+};
+
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
{
u32 i;
@@ -356,6 +436,28 @@ static const struct file_operations
ima_ascii_measurements_ops = {
.release = ima_measurements_release,
};
+static const struct seq_operations ima_ascii_measurements_staged_seqops = {
+ .start = ima_measurements_staged_start,
+ .next = ima_measurements_staged_next,
+ .stop = ima_measurements_stop,
+ .show = ima_ascii_measurements_show
+};
+
+static int ima_ascii_measurements_staged_open(struct inode *inode,
+ struct file *file)
+{
+ return _ima_measurements_open(inode, file,
+ &ima_ascii_measurements_staged_seqops);
+}
+
+static const struct file_operations ima_ascii_measurements_staged_ops = {
+ .open = ima_ascii_measurements_staged_open,
+ .read = seq_read,
+ .write = ima_measurements_staged_write,
+ .llseek = seq_lseek,
+ .release = ima_measurements_release,
+};
+
static ssize_t ima_read_policy(char *path)
{
void *data = NULL;
@@ -459,10 +561,21 @@ static const struct seq_operations ima_policy_seqops = {
};
#endif
-static int __init create_securityfs_measurement_lists(void)
+static int __init create_securityfs_measurement_lists(bool staging)
{
+ const struct file_operations *ascii_ops = &ima_ascii_measurements_ops;
+ const struct file_operations *binary_ops = &ima_measurements_ops;
+ mode_t permissions = S_IRUSR | S_IRGRP;
+ const char *file_suffix = "";
int count = NR_BANKS(ima_tpm_chip);
+ if (staging) {
+ ascii_ops = &ima_ascii_measurements_staged_ops;
+ binary_ops = &ima_measurements_staged_ops;
+ file_suffix = "_staged";
+ permissions |= (S_IWUSR | S_IWGRP);
+ }
+
if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip))
count++;
@@ -473,29 +586,32 @@ static int __init create_securityfs_measurement_lists(void)
if (algo == HASH_ALGO__LAST)
snprintf(file_name, sizeof(file_name),
- "ascii_runtime_measurements_tpm_alg_%x",
- ima_tpm_chip->allocated_banks[i].alg_id);
+ "ascii_runtime_measurements_tpm_alg_%x%s",
+ ima_tpm_chip->allocated_banks[i].alg_id,
+ file_suffix);
else
snprintf(file_name, sizeof(file_name),
- "ascii_runtime_measurements_%s",
- hash_algo_name[algo]);
- dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ "ascii_runtime_measurements_%s%s",
+ hash_algo_name[algo], file_suffix);
+ dentry = securityfs_create_file(file_name, permissions,
ima_dir, (void *)(uintptr_t)i,
- &ima_ascii_measurements_ops);
+ ascii_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (algo == HASH_ALGO__LAST)
snprintf(file_name, sizeof(file_name),
- "binary_runtime_measurements_tpm_alg_%x",
- ima_tpm_chip->allocated_banks[i].alg_id);
+ "binary_runtime_measurements_tpm_alg_%x%s",
+ ima_tpm_chip->allocated_banks[i].alg_id,
+ file_suffix);
else
snprintf(file_name, sizeof(file_name),
- "binary_runtime_measurements_%s",
- hash_algo_name[algo]);
- dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ "binary_runtime_measurements_%s%s",
+ hash_algo_name[algo], file_suffix);
+
+ dentry = securityfs_create_file(file_name, permissions,
ima_dir, (void *)(uintptr_t)i,
- &ima_measurements_ops);
+ binary_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
@@ -503,6 +619,23 @@ static int __init create_securityfs_measurement_lists(void)
return 0;
}
+static int __init create_securityfs_staging_links(void)
+{
+ struct dentry *dentry;
+
+ dentry = securityfs_create_symlink("binary_runtime_measurements_staged",
+ ima_dir, "binary_runtime_measurements_sha1_staged", NULL);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = securityfs_create_symlink("ascii_runtime_measurements_staged",
+ ima_dir, "ascii_runtime_measurements_sha1_staged", NULL);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
/*
* ima_open_policy: sequentialize access to the policy file
*/
@@ -595,7 +728,13 @@ int __init ima_fs_init(void)
goto out;
}
- ret = create_securityfs_measurement_lists();
+ ret = create_securityfs_measurement_lists(false);
+ if (ret == 0 && IS_ENABLED(CONFIG_IMA_STAGING)) {
+ ret = create_securityfs_measurement_lists(true);
+ if (ret == 0)
+ ret = create_securityfs_staging_links();
+ }
+
if (ret != 0)
goto out;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index d7d0fb639d99..d5503dd5cc9b 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -42,8 +42,8 @@ void ima_measure_kexec_event(const char *event_name)
long len;
int n;
- buf_size = ima_get_binary_runtime_size(BINARY);
- len = atomic_long_read(&ima_num_entries[BINARY]);
+ buf_size = ima_get_binary_runtime_size(BINARY_FULL);
+ len = atomic_long_read(&ima_num_entries[BINARY_FULL]);
n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
"kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
@@ -106,13 +106,26 @@ static int ima_dump_measurement_list(unsigned long
*buffer_size, void **buffer,
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
- /* This is an append-only list, no need to hold the RCU read lock */
- list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
+ /* It can race with ima_queue_stage() and ima_queue_delete_staged(). */
+ mutex_lock(&ima_extend_list_mutex);
+
+ list_for_each_entry_rcu(qe, &ima_measurements_staged, later,
+ lockdep_is_held(&ima_extend_list_mutex)) {
ret = ima_dump_measurement(&khdr, qe);
if (ret < 0)
break;
}
+ list_for_each_entry_rcu(qe, &ima_measurements, later,
+ lockdep_is_held(&ima_extend_list_mutex)) {
+ if (!ret)
+ ret = ima_dump_measurement(&khdr, qe);
+ if (ret < 0)
+ break;
+ }
+
+ mutex_unlock(&ima_extend_list_mutex);
+
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
@@ -167,6 +180,7 @@ void ima_add_kexec_buffer(struct kimage *image)
extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024;
binary_runtime_size = ima_get_binary_runtime_size(BINARY) +
+ ima_get_binary_runtime_size(BINARY_STAGED) +
extra_memory;
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
diff --git a/security/integrity/ima/ima_queue.c
b/security/integrity/ima/ima_queue.c
index b6d10dceb669..50519ed837d4 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -26,6 +26,7 @@
static struct tpm_digest *digests;
LIST_HEAD(ima_measurements); /* list of all measurements */
+LIST_HEAD(ima_measurements_staged); /* list of staged measurements */
#ifdef CONFIG_IMA_KEXEC
static unsigned long binary_runtime_size[BINARY__LAST];
#else
@@ -45,11 +46,11 @@ atomic_long_t ima_num_violations = ATOMIC_LONG_INIT(0);
/* key: inode (before secure-hashing a file) */
struct hlist_head __rcu *ima_htable;
-/* mutex protects atomicity of extending measurement list
+/* mutex protects atomicity of extending and staging measurement list
* and extending the TPM PCR aggregate. Since tpm_extend can take
* long (and the tpm driver uses a mutex), we can't use the spinlock.
*/
-static DEFINE_MUTEX(ima_extend_list_mutex);
+DEFINE_MUTEX(ima_extend_list_mutex);
/*
* Used internally by the kernel to suspend measurements.
@@ -174,12 +175,16 @@ static int ima_add_digest_entry(struct ima_template_entry
*entry,
lockdep_is_held(&ima_extend_list_mutex));
atomic_long_inc(&ima_num_entries[BINARY]);
+ atomic_long_inc(&ima_num_entries[BINARY_FULL]);
+
if (update_htable) {
key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
hlist_add_head_rcu(&qe->hnext, &htable[key]);
}
ima_update_binary_runtime_size(entry, BINARY);
+ ima_update_binary_runtime_size(entry, BINARY_FULL);
+
return 0;
}
@@ -280,6 +285,94 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
return result;
}
+int ima_queue_stage(void)
+{
+ int ret = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (!list_empty(&ima_measurements_staged)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ if (list_empty(&ima_measurements)) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ list_replace(&ima_measurements, &ima_measurements_staged);
+ INIT_LIST_HEAD(&ima_measurements);
+
+ atomic_long_set(&ima_num_entries[BINARY_STAGED],
+ atomic_long_read(&ima_num_entries[BINARY]));
+ atomic_long_set(&ima_num_entries[BINARY], 0);
+
+ if (IS_ENABLED(CONFIG_IMA_KEXEC)) {
+ binary_runtime_size[BINARY_STAGED] =
+ binary_runtime_size[BINARY];
+ binary_runtime_size[BINARY] = 0;
+ }
+out_unlock:
+ mutex_unlock(&ima_extend_list_mutex);
+ return ret;
+}
+
+static void ima_queue_delete(struct list_head *head);
+
+int ima_queue_staged_delete_all(void)
+{
+ LIST_HEAD(ima_measurements_trim);
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (list_empty(&ima_measurements_staged)) {
+ mutex_unlock(&ima_extend_list_mutex);
+ return -ENOENT;
+ }
+
+ list_replace(&ima_measurements_staged, &ima_measurements_trim);
+ INIT_LIST_HEAD(&ima_measurements_staged);
+
+ atomic_long_set(&ima_num_entries[BINARY_STAGED], 0);
+
+ if (IS_ENABLED(CONFIG_IMA_KEXEC))
+ binary_runtime_size[BINARY_STAGED] = 0;
+
+ mutex_unlock(&ima_extend_list_mutex);
+
+ ima_queue_delete(&ima_measurements_trim);
+ return 0;
+}
+
+static void ima_queue_delete(struct list_head *head)
+{
+ struct ima_queue_entry *qe, *qe_tmp;
+ unsigned int i;
+
+ list_for_each_entry_safe(qe, qe_tmp, head, later) {
+ /*
+ * Safe to free template_data here without synchronize_rcu()
+ * because the only htable reader, ima_lookup_digest_entry(),
+ * accesses only entry->digests, not template_data. If new
+ * htable readers are added that access template_data, a
+ * synchronize_rcu() is required here.
+ */
+ for (i = 0; i < qe->entry->template_desc->num_fields; i++) {
+ kfree(qe->entry->template_data[i].data);
+ qe->entry->template_data[i].data = NULL;
+ qe->entry->template_data[i].len = 0;
+ }
+
+ list_del(&qe->later);
+
+ /* No leak if condition is false, referenced by ima_htable. */
+ if (IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
+ kfree(qe->entry->digests);
+ kfree(qe->entry);
+ kfree(qe);
+ }
+ }
+}
+
int ima_restore_measurement_entry(struct ima_template_entry *entry)
{
int result = 0;