1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <linux/ndctl.h>
5 #include <linux/acpi.h>
10 static ssize_t firmware_activate_noidle_show(struct device *dev,
11 struct device_attribute *attr, char *buf)
13 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
14 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
15 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
17 return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
20 static ssize_t firmware_activate_noidle_store(struct device *dev,
21 struct device_attribute *attr, const char *buf, size_t size)
23 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
24 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
25 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
29 rc = kstrtobool(buf, &val);
32 if (val != acpi_desc->fwa_noidle)
33 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
34 acpi_desc->fwa_noidle = val;
37 DEVICE_ATTR_RW(firmware_activate_noidle);
39 bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
41 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
42 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
45 if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
48 mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
49 return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
52 static unsigned long intel_security_flags(struct nvdimm *nvdimm,
53 enum nvdimm_passphrase_type ptype)
55 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
56 unsigned long security_flags = 0;
58 struct nd_cmd_pkg pkg;
59 struct nd_intel_get_security_state cmd;
62 .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
63 .nd_family = NVDIMM_FAMILY_INTEL,
65 sizeof(struct nd_intel_get_security_state),
67 sizeof(struct nd_intel_get_security_state),
72 if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
76 * Short circuit the state retrieval while we are doing overwrite.
77 * The DSM spec states that the security state is indeterminate
78 * until the overwrite DSM completes.
80 if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
81 return BIT(NVDIMM_SECURITY_OVERWRITE);
83 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
84 if (rc < 0 || nd_cmd.cmd.status) {
85 pr_err("%s: security state retrieval failed (%d:%#x)\n",
86 nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
90 /* check and see if security is enabled and locked */
91 if (ptype == NVDIMM_MASTER) {
92 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
93 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
95 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
96 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
97 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
98 return security_flags;
101 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
104 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
105 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
106 nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
107 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
109 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
110 set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
112 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
114 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
116 return security_flags;
119 static int intel_security_freeze(struct nvdimm *nvdimm)
121 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
123 struct nd_cmd_pkg pkg;
124 struct nd_intel_freeze_lock cmd;
127 .nd_command = NVDIMM_INTEL_FREEZE_LOCK,
128 .nd_family = NVDIMM_FAMILY_INTEL,
129 .nd_size_out = ND_INTEL_STATUS_SIZE,
130 .nd_fw_size = ND_INTEL_STATUS_SIZE,
135 if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
138 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
141 if (nd_cmd.cmd.status)
146 static int intel_security_change_key(struct nvdimm *nvdimm,
147 const struct nvdimm_key_data *old_data,
148 const struct nvdimm_key_data *new_data,
149 enum nvdimm_passphrase_type ptype)
151 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
152 unsigned int cmd = ptype == NVDIMM_MASTER ?
153 NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
154 NVDIMM_INTEL_SET_PASSPHRASE;
156 struct nd_cmd_pkg pkg;
157 struct nd_intel_set_passphrase cmd;
160 .nd_family = NVDIMM_FAMILY_INTEL,
161 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
162 .nd_size_out = ND_INTEL_STATUS_SIZE,
163 .nd_fw_size = ND_INTEL_STATUS_SIZE,
169 if (!test_bit(cmd, &nfit_mem->dsm_mask))
172 memcpy(nd_cmd.cmd.old_pass, old_data->data,
173 sizeof(nd_cmd.cmd.old_pass));
174 memcpy(nd_cmd.cmd.new_pass, new_data->data,
175 sizeof(nd_cmd.cmd.new_pass));
176 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
180 switch (nd_cmd.cmd.status) {
183 case ND_INTEL_STATUS_INVALID_PASS:
185 case ND_INTEL_STATUS_NOT_SUPPORTED:
187 case ND_INTEL_STATUS_INVALID_STATE:
193 static void nvdimm_invalidate_cache(void);
195 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
196 const struct nvdimm_key_data *key_data)
198 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
200 struct nd_cmd_pkg pkg;
201 struct nd_intel_unlock_unit cmd;
204 .nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
205 .nd_family = NVDIMM_FAMILY_INTEL,
206 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
207 .nd_size_out = ND_INTEL_STATUS_SIZE,
208 .nd_fw_size = ND_INTEL_STATUS_SIZE,
213 if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
216 memcpy(nd_cmd.cmd.passphrase, key_data->data,
217 sizeof(nd_cmd.cmd.passphrase));
218 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
221 switch (nd_cmd.cmd.status) {
224 case ND_INTEL_STATUS_INVALID_PASS:
230 /* DIMM unlocked, invalidate all CPU caches before we read it */
231 nvdimm_invalidate_cache();
236 static int intel_security_disable(struct nvdimm *nvdimm,
237 const struct nvdimm_key_data *key_data)
240 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
242 struct nd_cmd_pkg pkg;
243 struct nd_intel_disable_passphrase cmd;
246 .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
247 .nd_family = NVDIMM_FAMILY_INTEL,
248 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
249 .nd_size_out = ND_INTEL_STATUS_SIZE,
250 .nd_fw_size = ND_INTEL_STATUS_SIZE,
254 if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
257 memcpy(nd_cmd.cmd.passphrase, key_data->data,
258 sizeof(nd_cmd.cmd.passphrase));
259 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
263 switch (nd_cmd.cmd.status) {
266 case ND_INTEL_STATUS_INVALID_PASS:
268 case ND_INTEL_STATUS_INVALID_STATE:
276 static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
277 const struct nvdimm_key_data *key,
278 enum nvdimm_passphrase_type ptype)
281 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
282 unsigned int cmd = ptype == NVDIMM_MASTER ?
283 NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
285 struct nd_cmd_pkg pkg;
286 struct nd_intel_secure_erase cmd;
289 .nd_family = NVDIMM_FAMILY_INTEL,
290 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
291 .nd_size_out = ND_INTEL_STATUS_SIZE,
292 .nd_fw_size = ND_INTEL_STATUS_SIZE,
297 if (!test_bit(cmd, &nfit_mem->dsm_mask))
300 /* flush all cache before we erase DIMM */
301 nvdimm_invalidate_cache();
302 memcpy(nd_cmd.cmd.passphrase, key->data,
303 sizeof(nd_cmd.cmd.passphrase));
304 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
308 switch (nd_cmd.cmd.status) {
311 case ND_INTEL_STATUS_NOT_SUPPORTED:
313 case ND_INTEL_STATUS_INVALID_PASS:
315 case ND_INTEL_STATUS_INVALID_STATE:
320 /* DIMM erased, invalidate all CPU caches before we read it */
321 nvdimm_invalidate_cache();
325 static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
328 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
330 struct nd_cmd_pkg pkg;
331 struct nd_intel_query_overwrite cmd;
334 .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
335 .nd_family = NVDIMM_FAMILY_INTEL,
336 .nd_size_out = ND_INTEL_STATUS_SIZE,
337 .nd_fw_size = ND_INTEL_STATUS_SIZE,
341 if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
344 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
348 switch (nd_cmd.cmd.status) {
351 case ND_INTEL_STATUS_OQUERY_INPROGRESS:
357 /* flush all cache before we make the nvdimms available */
358 nvdimm_invalidate_cache();
362 static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
363 const struct nvdimm_key_data *nkey)
366 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
368 struct nd_cmd_pkg pkg;
369 struct nd_intel_overwrite cmd;
372 .nd_command = NVDIMM_INTEL_OVERWRITE,
373 .nd_family = NVDIMM_FAMILY_INTEL,
374 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
375 .nd_size_out = ND_INTEL_STATUS_SIZE,
376 .nd_fw_size = ND_INTEL_STATUS_SIZE,
380 if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
383 /* flush all cache before we erase DIMM */
384 nvdimm_invalidate_cache();
385 memcpy(nd_cmd.cmd.passphrase, nkey->data,
386 sizeof(nd_cmd.cmd.passphrase));
387 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
391 switch (nd_cmd.cmd.status) {
394 case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
396 case ND_INTEL_STATUS_INVALID_PASS:
398 case ND_INTEL_STATUS_INVALID_STATE:
405 * TODO: define a cross arch wbinvd equivalent when/if
406 * NVDIMM_FAMILY_INTEL command support arrives on another arch.
409 static void nvdimm_invalidate_cache(void)
411 wbinvd_on_all_cpus();
414 static void nvdimm_invalidate_cache(void)
416 WARN_ON_ONCE("cache invalidation required after unlock\n");
420 static const struct nvdimm_security_ops __intel_security_ops = {
421 .get_flags = intel_security_flags,
422 .freeze = intel_security_freeze,
423 .change_key = intel_security_change_key,
424 .disable = intel_security_disable,
426 .unlock = intel_security_unlock,
427 .erase = intel_security_erase,
428 .overwrite = intel_security_overwrite,
429 .query_overwrite = intel_security_query_overwrite,
433 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
435 static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
436 struct nd_intel_bus_fw_activate_businfo *info)
439 struct nd_cmd_pkg pkg;
440 struct nd_intel_bus_fw_activate_businfo cmd;
443 .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
444 .nd_family = NVDIMM_BUS_FAMILY_INTEL,
446 sizeof(struct nd_intel_bus_fw_activate_businfo),
448 sizeof(struct nd_intel_bus_fw_activate_businfo),
453 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
459 /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
460 static enum nvdimm_fwa_state intel_bus_fwa_state(
461 struct nvdimm_bus_descriptor *nd_desc)
463 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
464 struct nd_intel_bus_fw_activate_businfo info;
465 struct device *dev = acpi_desc->dev;
466 enum nvdimm_fwa_state state;
470 * It should not be possible for platform firmware to return
471 * busy because activate is a synchronous operation. Treat it
472 * similar to invalid, i.e. always refresh / poll the status.
474 switch (acpi_desc->fwa_state) {
475 case NVDIMM_FWA_INVALID:
476 case NVDIMM_FWA_BUSY:
479 /* check if capability needs to be refreshed */
480 if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
482 return acpi_desc->fwa_state;
485 /* Refresh with platform firmware */
486 rc = intel_bus_fwa_businfo(nd_desc, &info);
488 return NVDIMM_FWA_INVALID;
490 switch (info.state) {
491 case ND_INTEL_FWA_IDLE:
492 state = NVDIMM_FWA_IDLE;
494 case ND_INTEL_FWA_BUSY:
495 state = NVDIMM_FWA_BUSY;
497 case ND_INTEL_FWA_ARMED:
498 if (info.activate_tmo > info.max_quiesce_tmo)
499 state = NVDIMM_FWA_ARM_OVERFLOW;
501 state = NVDIMM_FWA_ARMED;
504 dev_err_once(dev, "invalid firmware activate state %d\n",
506 return NVDIMM_FWA_INVALID;
510 * Capability data is available in the same payload as state. It
511 * is expected to be static.
513 if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
514 if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
515 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
516 else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
518 * Skip hibernate cycle by default if platform
519 * indicates that it does not need devices to be
522 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
524 acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
527 acpi_desc->fwa_state = state;
532 static enum nvdimm_fwa_capability intel_bus_fwa_capability(
533 struct nvdimm_bus_descriptor *nd_desc)
535 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
537 if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
538 return acpi_desc->fwa_cap;
540 if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
541 return acpi_desc->fwa_cap;
543 return NVDIMM_FWA_CAP_INVALID;
546 static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
548 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
550 struct nd_cmd_pkg pkg;
551 struct nd_intel_bus_fw_activate cmd;
554 .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
555 .nd_family = NVDIMM_BUS_FAMILY_INTEL,
556 .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
558 sizeof(struct nd_intel_bus_fw_activate),
560 sizeof(struct nd_intel_bus_fw_activate),
563 * Even though activate is run from a suspended context,
564 * for safety, still ask platform firmware to force
565 * quiesce devices by default. Let a module
566 * parameter override that policy.
569 .iodev_state = acpi_desc->fwa_noidle
570 ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
571 : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
576 switch (intel_bus_fwa_state(nd_desc)) {
577 case NVDIMM_FWA_ARMED:
578 case NVDIMM_FWA_ARM_OVERFLOW:
584 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
588 * Whether the command succeeded, or failed, the agent checking
589 * for the result needs to query the DIMMs individually.
590 * Increment the activation count to invalidate all the DIMM
591 * states at once (it's otherwise not possible to take
592 * acpi_desc->init_mutex in this context)
594 acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
595 acpi_desc->fwa_count++;
597 dev_dbg(acpi_desc->dev, "result: %d\n", rc);
602 static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
603 .activate_state = intel_bus_fwa_state,
604 .capability = intel_bus_fwa_capability,
605 .activate = intel_bus_fwa_activate,
608 const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
610 static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
611 struct nd_intel_fw_activate_dimminfo *info)
614 struct nd_cmd_pkg pkg;
615 struct nd_intel_fw_activate_dimminfo cmd;
618 .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
619 .nd_family = NVDIMM_FAMILY_INTEL,
621 sizeof(struct nd_intel_fw_activate_dimminfo),
623 sizeof(struct nd_intel_fw_activate_dimminfo),
628 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
633 static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
635 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
636 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
637 struct nd_intel_fw_activate_dimminfo info;
641 * Similar to the bus state, since activate is synchronous the
642 * busy state should resolve within the context of 'activate'.
644 switch (nfit_mem->fwa_state) {
645 case NVDIMM_FWA_INVALID:
646 case NVDIMM_FWA_BUSY:
649 /* If no activations occurred the old state is still valid */
650 if (nfit_mem->fwa_count == acpi_desc->fwa_count)
651 return nfit_mem->fwa_state;
654 rc = intel_fwa_dimminfo(nvdimm, &info);
656 return NVDIMM_FWA_INVALID;
658 switch (info.state) {
659 case ND_INTEL_FWA_IDLE:
660 nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
662 case ND_INTEL_FWA_BUSY:
663 nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
665 case ND_INTEL_FWA_ARMED:
666 nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
669 nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
673 switch (info.result) {
674 case ND_INTEL_DIMM_FWA_NONE:
675 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
677 case ND_INTEL_DIMM_FWA_SUCCESS:
678 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
680 case ND_INTEL_DIMM_FWA_NOTSTAGED:
681 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
683 case ND_INTEL_DIMM_FWA_NEEDRESET:
684 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
686 case ND_INTEL_DIMM_FWA_MEDIAFAILED:
687 case ND_INTEL_DIMM_FWA_ABORT:
688 case ND_INTEL_DIMM_FWA_NOTSUPP:
689 case ND_INTEL_DIMM_FWA_ERROR:
691 nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
695 nfit_mem->fwa_count = acpi_desc->fwa_count;
697 return nfit_mem->fwa_state;
700 static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
702 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
703 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
705 if (nfit_mem->fwa_count == acpi_desc->fwa_count
706 && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
707 return nfit_mem->fwa_result;
709 if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
710 return nfit_mem->fwa_result;
712 return NVDIMM_FWA_RESULT_INVALID;
715 static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
717 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
718 struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
720 struct nd_cmd_pkg pkg;
721 struct nd_intel_fw_activate_arm cmd;
724 .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
725 .nd_family = NVDIMM_FAMILY_INTEL,
726 .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
728 sizeof(struct nd_intel_fw_activate_arm),
730 sizeof(struct nd_intel_fw_activate_arm),
733 .activate_arm = arm == NVDIMM_FWA_ARM
734 ? ND_INTEL_DIMM_FWA_ARM
735 : ND_INTEL_DIMM_FWA_DISARM,
740 switch (intel_fwa_state(nvdimm)) {
741 case NVDIMM_FWA_INVALID:
743 case NVDIMM_FWA_BUSY:
745 case NVDIMM_FWA_IDLE:
746 if (arm == NVDIMM_FWA_DISARM)
749 case NVDIMM_FWA_ARMED:
750 if (arm == NVDIMM_FWA_ARM)
758 * Invalidate the bus-level state, now that we're committed to
759 * changing the 'arm' state.
761 acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
762 nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
764 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
766 dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
767 ? "arm" : "disarm", rc);
771 static const struct nvdimm_fw_ops __intel_fw_ops = {
772 .activate_state = intel_fwa_state,
773 .activate_result = intel_fwa_result,
774 .arm = intel_fwa_arm,
777 const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;