2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <http://www.gnu.org/licenses/>.
27 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
43 static void nci_cmd_work(struct work_struct *work);
44 static void nci_rx_work(struct work_struct *work);
45 static void nci_tx_work(struct work_struct *work);
47 /* ---- NCI requests ---- */
49 void nci_req_complete(struct nci_dev *ndev, int result)
51 if (ndev->req_status == NCI_REQ_PEND) {
52 ndev->req_result = result;
53 ndev->req_status = NCI_REQ_DONE;
54 complete(&ndev->req_completion);
58 static void nci_req_cancel(struct nci_dev *ndev, int err)
60 if (ndev->req_status == NCI_REQ_PEND) {
61 ndev->req_result = err;
62 ndev->req_status = NCI_REQ_CANCELED;
63 complete(&ndev->req_completion);
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt, __u32 timeout)
75 ndev->req_status = NCI_REQ_PEND;
77 init_completion(&ndev->req_completion);
80 wait_for_completion_interruptible_timeout(&ndev->req_completion,
83 pr_debug("wait_for_completion return %ld\n", completion_rc);
85 if (completion_rc > 0) {
86 switch (ndev->req_status) {
88 rc = nci_to_errno(ndev->req_result);
91 case NCI_REQ_CANCELED:
92 rc = -ndev->req_result;
100 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
103 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
106 ndev->req_status = ndev->req_result = 0;
111 static inline int nci_request(struct nci_dev *ndev,
112 void (*req)(struct nci_dev *ndev,
114 unsigned long opt, __u32 timeout)
118 if (!test_bit(NCI_UP, &ndev->flags))
121 /* Serialize all requests */
122 mutex_lock(&ndev->req_lock);
123 rc = __nci_request(ndev, req, opt, timeout);
124 mutex_unlock(&ndev->req_lock);
129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
131 struct nci_core_reset_cmd cmd;
133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
144 struct nci_rf_disc_map_cmd cmd;
145 struct disc_map_config *cfg = cmd.mapping_configs;
146 __u8 *num = &cmd.num_mapping_configs;
149 /* set rf mapping configurations */
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN;
166 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
170 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
178 struct nci_set_config_param {
184 static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
186 struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
187 struct nci_core_set_config_cmd cmd;
189 BUG_ON(param->len > NCI_MAX_PARAM_LEN);
192 cmd.param.id = param->id;
193 cmd.param.len = param->len;
194 memcpy(cmd.param.val, param->val, param->len);
196 nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
199 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
201 struct nci_rf_disc_cmd cmd;
202 __u32 protocols = opt;
204 cmd.num_disc_configs = 0;
206 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
207 (protocols & NFC_PROTO_JEWEL_MASK ||
208 protocols & NFC_PROTO_MIFARE_MASK ||
209 protocols & NFC_PROTO_ISO14443_MASK ||
210 protocols & NFC_PROTO_NFC_DEP_MASK)) {
211 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
212 NCI_NFC_A_PASSIVE_POLL_MODE;
213 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
214 cmd.num_disc_configs++;
217 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
218 (protocols & NFC_PROTO_ISO14443_B_MASK)) {
219 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
220 NCI_NFC_B_PASSIVE_POLL_MODE;
221 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
222 cmd.num_disc_configs++;
225 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
226 (protocols & NFC_PROTO_FELICA_MASK ||
227 protocols & NFC_PROTO_NFC_DEP_MASK)) {
228 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
229 NCI_NFC_F_PASSIVE_POLL_MODE;
230 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
231 cmd.num_disc_configs++;
234 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
235 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
239 struct nci_rf_discover_select_param {
240 __u8 rf_discovery_id;
244 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
246 struct nci_rf_discover_select_param *param =
247 (struct nci_rf_discover_select_param *)opt;
248 struct nci_rf_discover_select_cmd cmd;
250 cmd.rf_discovery_id = param->rf_discovery_id;
251 cmd.rf_protocol = param->rf_protocol;
253 switch (cmd.rf_protocol) {
254 case NCI_RF_PROTOCOL_ISO_DEP:
255 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
258 case NCI_RF_PROTOCOL_NFC_DEP:
259 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
263 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
267 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
268 sizeof(struct nci_rf_discover_select_cmd), &cmd);
271 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
273 struct nci_rf_deactivate_cmd cmd;
275 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
277 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
278 sizeof(struct nci_rf_deactivate_cmd), &cmd);
281 static int nci_open_device(struct nci_dev *ndev)
285 mutex_lock(&ndev->req_lock);
287 if (test_bit(NCI_UP, &ndev->flags)) {
292 if (ndev->ops->open(ndev)) {
297 atomic_set(&ndev->cmd_cnt, 1);
299 set_bit(NCI_INIT, &ndev->flags);
301 rc = __nci_request(ndev, nci_reset_req, 0,
302 msecs_to_jiffies(NCI_RESET_TIMEOUT));
304 if (ndev->ops->setup(ndev))
305 ndev->ops->setup(ndev);
308 rc = __nci_request(ndev, nci_init_req, 0,
309 msecs_to_jiffies(NCI_INIT_TIMEOUT));
313 rc = __nci_request(ndev, nci_init_complete_req, 0,
314 msecs_to_jiffies(NCI_INIT_TIMEOUT));
317 clear_bit(NCI_INIT, &ndev->flags);
320 set_bit(NCI_UP, &ndev->flags);
321 nci_clear_target_list(ndev);
322 atomic_set(&ndev->state, NCI_IDLE);
324 /* Init failed, cleanup */
325 skb_queue_purge(&ndev->cmd_q);
326 skb_queue_purge(&ndev->rx_q);
327 skb_queue_purge(&ndev->tx_q);
329 ndev->ops->close(ndev);
334 mutex_unlock(&ndev->req_lock);
338 static int nci_close_device(struct nci_dev *ndev)
340 nci_req_cancel(ndev, ENODEV);
341 mutex_lock(&ndev->req_lock);
343 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
344 del_timer_sync(&ndev->cmd_timer);
345 del_timer_sync(&ndev->data_timer);
346 mutex_unlock(&ndev->req_lock);
350 /* Drop RX and TX queues */
351 skb_queue_purge(&ndev->rx_q);
352 skb_queue_purge(&ndev->tx_q);
354 /* Flush RX and TX wq */
355 flush_workqueue(ndev->rx_wq);
356 flush_workqueue(ndev->tx_wq);
359 skb_queue_purge(&ndev->cmd_q);
360 atomic_set(&ndev->cmd_cnt, 1);
362 set_bit(NCI_INIT, &ndev->flags);
363 __nci_request(ndev, nci_reset_req, 0,
364 msecs_to_jiffies(NCI_RESET_TIMEOUT));
365 clear_bit(NCI_INIT, &ndev->flags);
367 del_timer_sync(&ndev->cmd_timer);
370 flush_workqueue(ndev->cmd_wq);
372 /* After this point our queues are empty
373 * and no works are scheduled. */
374 ndev->ops->close(ndev);
379 mutex_unlock(&ndev->req_lock);
384 /* NCI command timer function */
385 static void nci_cmd_timer(unsigned long arg)
387 struct nci_dev *ndev = (void *) arg;
389 atomic_set(&ndev->cmd_cnt, 1);
390 queue_work(ndev->cmd_wq, &ndev->cmd_work);
393 /* NCI data exchange timer function */
394 static void nci_data_timer(unsigned long arg)
396 struct nci_dev *ndev = (void *) arg;
398 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
399 queue_work(ndev->rx_wq, &ndev->rx_work);
402 static int nci_dev_up(struct nfc_dev *nfc_dev)
404 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
406 return nci_open_device(ndev);
409 static int nci_dev_down(struct nfc_dev *nfc_dev)
411 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
413 return nci_close_device(ndev);
416 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val)
418 struct nci_set_config_param param;
427 return __nci_request(ndev, nci_set_config_req, (unsigned long)¶m,
428 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
430 EXPORT_SYMBOL(nci_set_config);
432 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
434 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
435 struct nci_set_config_param param;
437 param.val = nfc_get_local_general_bytes(nfc_dev, ¶m.len);
438 if ((param.val == NULL) || (param.len == 0))
441 if (param.len > NFC_MAX_GT_LEN)
444 param.id = NCI_PN_ATR_REQ_GEN_BYTES;
446 return nci_request(ndev, nci_set_config_req, (unsigned long)¶m,
447 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
450 static int nci_start_poll(struct nfc_dev *nfc_dev,
451 __u32 im_protocols, __u32 tm_protocols)
453 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
456 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
457 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
458 pr_err("unable to start poll, since poll is already active\n");
462 if (ndev->target_active_prot) {
463 pr_err("there is an active target\n");
467 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
468 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
469 pr_debug("target active or w4 select, implicitly deactivate\n");
471 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
472 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
477 if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
478 rc = nci_set_local_general_bytes(nfc_dev);
480 pr_err("failed to set local general bytes\n");
485 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
486 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
489 ndev->poll_prots = im_protocols;
494 static void nci_stop_poll(struct nfc_dev *nfc_dev)
496 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
498 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
499 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
500 pr_err("unable to stop poll, since poll is not active\n");
504 nci_request(ndev, nci_rf_deactivate_req, 0,
505 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
508 static int nci_activate_target(struct nfc_dev *nfc_dev,
509 struct nfc_target *target, __u32 protocol)
511 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
512 struct nci_rf_discover_select_param param;
513 struct nfc_target *nci_target = NULL;
517 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
519 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
520 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
521 pr_err("there is no available target to activate\n");
525 if (ndev->target_active_prot) {
526 pr_err("there is already an active target\n");
530 for (i = 0; i < ndev->n_targets; i++) {
531 if (ndev->targets[i].idx == target->idx) {
532 nci_target = &ndev->targets[i];
538 pr_err("unable to find the selected target\n");
542 if (!(nci_target->supported_protocols & (1 << protocol))) {
543 pr_err("target does not support the requested protocol 0x%x\n",
548 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
549 param.rf_discovery_id = nci_target->logical_idx;
551 if (protocol == NFC_PROTO_JEWEL)
552 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
553 else if (protocol == NFC_PROTO_MIFARE)
554 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
555 else if (protocol == NFC_PROTO_FELICA)
556 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
557 else if (protocol == NFC_PROTO_ISO14443 ||
558 protocol == NFC_PROTO_ISO14443_B)
559 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
561 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
563 rc = nci_request(ndev, nci_rf_discover_select_req,
564 (unsigned long)¶m,
565 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
569 ndev->target_active_prot = protocol;
574 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
575 struct nfc_target *target)
577 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
581 if (!ndev->target_active_prot) {
582 pr_err("unable to deactivate target, no active target\n");
586 ndev->target_active_prot = 0;
588 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
589 nci_request(ndev, nci_rf_deactivate_req, 0,
590 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
594 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
595 __u8 comm_mode, __u8 *gb, size_t gb_len)
597 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
600 pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
602 rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
606 rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
607 ndev->remote_gb_len);
609 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
615 static int nci_dep_link_down(struct nfc_dev *nfc_dev)
619 nci_deactivate_target(nfc_dev, NULL);
625 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
627 data_exchange_cb_t cb, void *cb_context)
629 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
632 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
634 if (!ndev->target_active_prot) {
635 pr_err("unable to exchange data, no active target\n");
639 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
642 /* store cb and context to be used on receiving data */
643 ndev->data_exchange_cb = cb;
644 ndev->data_exchange_cb_context = cb_context;
646 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
648 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
653 static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
658 static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
663 static int nci_discover_se(struct nfc_dev *nfc_dev)
668 static struct nfc_ops nci_nfc_ops = {
669 .dev_up = nci_dev_up,
670 .dev_down = nci_dev_down,
671 .start_poll = nci_start_poll,
672 .stop_poll = nci_stop_poll,
673 .dep_link_up = nci_dep_link_up,
674 .dep_link_down = nci_dep_link_down,
675 .activate_target = nci_activate_target,
676 .deactivate_target = nci_deactivate_target,
677 .im_transceive = nci_transceive,
678 .enable_se = nci_enable_se,
679 .disable_se = nci_disable_se,
680 .discover_se = nci_discover_se,
683 /* ---- Interface to NCI drivers ---- */
686 * nci_allocate_device - allocate a new nci device
688 * @ops: device operations
689 * @supported_protocols: NFC protocols supported by the device
691 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
692 __u32 supported_protocols,
693 int tx_headroom, int tx_tailroom)
695 struct nci_dev *ndev;
697 pr_debug("supported_protocols 0x%x\n", supported_protocols);
699 if (!ops->open || !ops->close || !ops->send)
702 if (!supported_protocols)
705 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
710 ndev->tx_headroom = tx_headroom;
711 ndev->tx_tailroom = tx_tailroom;
713 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
715 tx_headroom + NCI_DATA_HDR_SIZE,
720 nfc_set_drvdata(ndev->nfc_dev, ndev);
728 EXPORT_SYMBOL(nci_allocate_device);
731 * nci_free_device - deallocate nci device
733 * @ndev: The nci device to deallocate
735 void nci_free_device(struct nci_dev *ndev)
737 nfc_free_device(ndev->nfc_dev);
740 EXPORT_SYMBOL(nci_free_device);
743 * nci_register_device - register a nci device in the nfc subsystem
745 * @dev: The nci device to register
747 int nci_register_device(struct nci_dev *ndev)
750 struct device *dev = &ndev->nfc_dev->dev;
753 rc = nfc_register_device(ndev->nfc_dev);
759 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
760 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
761 ndev->cmd_wq = create_singlethread_workqueue(name);
767 INIT_WORK(&ndev->rx_work, nci_rx_work);
768 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
769 ndev->rx_wq = create_singlethread_workqueue(name);
772 goto destroy_cmd_wq_exit;
775 INIT_WORK(&ndev->tx_work, nci_tx_work);
776 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
777 ndev->tx_wq = create_singlethread_workqueue(name);
780 goto destroy_rx_wq_exit;
783 skb_queue_head_init(&ndev->cmd_q);
784 skb_queue_head_init(&ndev->rx_q);
785 skb_queue_head_init(&ndev->tx_q);
787 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
788 (unsigned long) ndev);
789 setup_timer(&ndev->data_timer, nci_data_timer,
790 (unsigned long) ndev);
792 mutex_init(&ndev->req_lock);
797 destroy_workqueue(ndev->rx_wq);
800 destroy_workqueue(ndev->cmd_wq);
803 nfc_unregister_device(ndev->nfc_dev);
808 EXPORT_SYMBOL(nci_register_device);
811 * nci_unregister_device - unregister a nci device in the nfc subsystem
813 * @dev: The nci device to unregister
815 void nci_unregister_device(struct nci_dev *ndev)
817 nci_close_device(ndev);
819 destroy_workqueue(ndev->cmd_wq);
820 destroy_workqueue(ndev->rx_wq);
821 destroy_workqueue(ndev->tx_wq);
823 nfc_unregister_device(ndev->nfc_dev);
825 EXPORT_SYMBOL(nci_unregister_device);
828 * nci_recv_frame - receive frame from NCI drivers
830 * @ndev: The nci device
831 * @skb: The sk_buff to receive
833 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
835 pr_debug("len %d\n", skb->len);
837 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
838 !test_bit(NCI_INIT, &ndev->flags))) {
843 /* Queue frame for rx worker thread */
844 skb_queue_tail(&ndev->rx_q, skb);
845 queue_work(ndev->rx_wq, &ndev->rx_work);
849 EXPORT_SYMBOL(nci_recv_frame);
851 static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
853 pr_debug("len %d\n", skb->len);
860 /* Get rid of skb owner, prior to sending to the driver. */
863 return ndev->ops->send(ndev, skb);
866 /* Send NCI command */
867 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
869 struct nci_ctrl_hdr *hdr;
872 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
874 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
876 pr_err("no memory for command\n");
880 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
881 hdr->gid = nci_opcode_gid(opcode);
882 hdr->oid = nci_opcode_oid(opcode);
885 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
886 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
889 memcpy(skb_put(skb, plen), payload, plen);
891 skb_queue_tail(&ndev->cmd_q, skb);
892 queue_work(ndev->cmd_wq, &ndev->cmd_work);
897 /* ---- NCI TX Data worker thread ---- */
899 static void nci_tx_work(struct work_struct *work)
901 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
904 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
906 /* Send queued tx data */
907 while (atomic_read(&ndev->credits_cnt)) {
908 skb = skb_dequeue(&ndev->tx_q);
912 /* Check if data flow control is used */
913 if (atomic_read(&ndev->credits_cnt) !=
914 NCI_DATA_FLOW_CONTROL_NOT_USED)
915 atomic_dec(&ndev->credits_cnt);
917 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
919 nci_conn_id(skb->data),
920 nci_plen(skb->data));
922 nci_send_frame(ndev, skb);
924 mod_timer(&ndev->data_timer,
925 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
929 /* ----- NCI RX worker thread (data & control) ----- */
931 static void nci_rx_work(struct work_struct *work)
933 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
936 while ((skb = skb_dequeue(&ndev->rx_q))) {
938 switch (nci_mt(skb->data)) {
940 nci_rsp_packet(ndev, skb);
944 nci_ntf_packet(ndev, skb);
947 case NCI_MT_DATA_PKT:
948 nci_rx_data_packet(ndev, skb);
952 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
958 /* check if a data exchange timout has occurred */
959 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
960 /* complete the data exchange transaction, if exists */
961 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
962 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
964 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
968 /* ----- NCI TX CMD worker thread ----- */
970 static void nci_cmd_work(struct work_struct *work)
972 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
975 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
977 /* Send queued command */
978 if (atomic_read(&ndev->cmd_cnt)) {
979 skb = skb_dequeue(&ndev->cmd_q);
983 atomic_dec(&ndev->cmd_cnt);
985 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
987 nci_opcode_gid(nci_opcode(skb->data)),
988 nci_opcode_oid(nci_opcode(skb->data)),
989 nci_plen(skb->data));
991 nci_send_frame(ndev, skb);
993 mod_timer(&ndev->cmd_timer,
994 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
998 MODULE_LICENSE("GPL");