1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
18 #define NVDIMM_BASE_KEY 0
19 #define NVDIMM_NEW_KEY 1
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
25 static void *key_data(struct key *key)
27 struct encrypted_key_payload *epayload = dereference_key_locked(key);
29 lockdep_assert_held_read(&key->sem);
31 return epayload->decrypted_data;
34 static void nvdimm_put_key(struct key *key)
44 * Retrieve kernel key for DIMM and request from user space if
45 * necessary. Returns a key held for read and must be put by
46 * nvdimm_put_key() before the usage goes out of scope.
48 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
50 struct key *key = NULL;
51 static const char NVDIMM_PREFIX[] = "nvdimm:";
52 char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
53 struct device *dev = &nvdimm->dev;
55 sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
56 key = request_key(&key_type_encrypted, desc, "");
58 if (PTR_ERR(key) == -ENOKEY)
59 dev_warn(dev, "request_key() found no key\n");
61 dev_warn(dev, "request_key() upcall failed\n");
64 struct encrypted_key_payload *epayload;
67 epayload = dereference_key_locked(key);
68 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
78 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
79 key_serial_t id, int subclass)
83 struct encrypted_key_payload *epayload;
84 struct device *dev = &nvdimm->dev;
86 keyref = lookup_user_key(id, 0, 0);
90 key = key_ref_to_ptr(keyref);
91 if (key->type != &key_type_encrypted) {
96 dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
98 down_read_nested(&key->sem, subclass);
99 epayload = dereference_key_locked(key);
100 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
108 static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
113 if (!nvdimm->sec.ops->change_key)
116 key = nvdimm_request_key(nvdimm);
121 * Send the same key to the hardware as new and old key to
122 * verify that the key is good.
124 rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key), key_data(key));
132 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
134 struct device *dev = &nvdimm->dev;
135 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
136 struct key *key = NULL;
139 /* The bus lock should be held at the top level of the call stack */
140 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
142 if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
143 || nvdimm->sec.state < 0)
146 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
147 dev_warn(dev, "Security operation in progress.\n");
152 * If the pre-OS has unlocked the DIMM, attempt to send the key
153 * from request_key() to the hardware for verification. Failure
154 * to revalidate the key against the hardware results in a
155 * freeze of the security configuration. I.e. if the OS does not
156 * have the key, security is being managed pre-OS.
158 if (nvdimm->sec.state == NVDIMM_SECURITY_UNLOCKED) {
162 key = nvdimm_key_revalidate(nvdimm);
164 return nvdimm_security_freeze(nvdimm);
166 key = nvdimm_request_key(nvdimm);
171 rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
172 dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
173 rc == 0 ? "success" : "fail");
176 nvdimm->sec.state = nvdimm_security_state(nvdimm);
180 int nvdimm_security_unlock(struct device *dev)
182 struct nvdimm *nvdimm = to_nvdimm(dev);
185 nvdimm_bus_lock(dev);
186 rc = __nvdimm_security_unlock(nvdimm);
187 nvdimm_bus_unlock(dev);
191 int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
193 struct device *dev = &nvdimm->dev;
194 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
198 /* The bus lock should be held at the top level of the call stack */
199 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
201 if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
202 || nvdimm->sec.state < 0)
205 if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
206 dev_warn(dev, "Incorrect security state: %d\n",
211 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
212 dev_warn(dev, "Security operation in progress.\n");
216 key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
220 rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
221 dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
222 rc == 0 ? "success" : "fail");
225 nvdimm->sec.state = nvdimm_security_state(nvdimm);
229 int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
230 unsigned int new_keyid)
232 struct device *dev = &nvdimm->dev;
233 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
234 struct key *key, *newkey;
237 /* The bus lock should be held at the top level of the call stack */
238 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
240 if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
241 || nvdimm->sec.state < 0)
244 if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
245 dev_warn(dev, "Incorrect security state: %d\n",
253 key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
258 newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
264 rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
266 dev_dbg(dev, "key: %d %d update: %s\n",
267 key_serial(key), key_serial(newkey),
268 rc == 0 ? "success" : "fail");
270 nvdimm_put_key(newkey);
272 nvdimm->sec.state = nvdimm_security_state(nvdimm);
276 int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid)
278 struct device *dev = &nvdimm->dev;
279 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
283 /* The bus lock should be held at the top level of the call stack */
284 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
286 if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
287 || nvdimm->sec.state < 0)
290 if (atomic_read(&nvdimm->busy)) {
291 dev_warn(dev, "Unable to secure erase while DIMM active.\n");
295 if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
296 dev_warn(dev, "Incorrect security state: %d\n",
301 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
302 dev_warn(dev, "Security operation in progress.\n");
306 key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
310 rc = nvdimm->sec.ops->erase(nvdimm, key_data(key));
311 dev_dbg(dev, "key: %d erase: %s\n", key_serial(key),
312 rc == 0 ? "success" : "fail");
315 nvdimm->sec.state = nvdimm_security_state(nvdimm);
319 int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
321 struct device *dev = &nvdimm->dev;
322 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
326 /* The bus lock should be held at the top level of the call stack */
327 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
329 if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
330 || nvdimm->sec.state < 0)
333 if (atomic_read(&nvdimm->busy)) {
334 dev_warn(dev, "Unable to overwrite while DIMM active.\n");
338 if (dev->driver == NULL) {
339 dev_warn(dev, "Unable to overwrite while DIMM active.\n");
343 if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
344 dev_warn(dev, "Incorrect security state: %d\n",
349 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
350 dev_warn(dev, "Security operation in progress.\n");
357 key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
362 rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
363 dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
364 rc == 0 ? "success" : "fail");
368 set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
369 set_bit(NDD_WORK_PENDING, &nvdimm->flags);
370 nvdimm->sec.state = NVDIMM_SECURITY_OVERWRITE;
372 * Make sure we don't lose device while doing overwrite
376 queue_delayed_work(system_wq, &nvdimm->dwork, 0);
381 void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
383 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
387 /* The bus lock should be held at the top level of the call stack */
388 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
391 * Abort and release device if we no longer have the overwrite
392 * flag set. It means the work has been canceled.
394 if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
397 tmo = nvdimm->sec.overwrite_tmo;
399 if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
400 || nvdimm->sec.state < 0)
403 rc = nvdimm->sec.ops->query_overwrite(nvdimm);
406 /* setup delayed work again */
408 queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
409 nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
414 dev_warn(&nvdimm->dev, "overwrite failed\n");
416 dev_dbg(&nvdimm->dev, "overwrite completed\n");
418 if (nvdimm->sec.overwrite_state)
419 sysfs_notify_dirent(nvdimm->sec.overwrite_state);
420 nvdimm->sec.overwrite_tmo = 0;
421 clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
422 clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
423 put_device(&nvdimm->dev);
424 nvdimm->sec.state = nvdimm_security_state(nvdimm);
427 void nvdimm_security_overwrite_query(struct work_struct *work)
429 struct nvdimm *nvdimm =
430 container_of(work, typeof(*nvdimm), dwork.work);
432 nvdimm_bus_lock(&nvdimm->dev);
433 __nvdimm_security_overwrite_query(nvdimm);
434 nvdimm_bus_unlock(&nvdimm->dev);