1 /* SPDX-License-Identifier: GPL-2.0 */
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
6 #ifndef _IPA_ENDPOINT_H_
7 #define _IPA_ENDPOINT_H_
9 #include <linux/types.h>
10 #include <linux/workqueue.h>
11 #include <linux/if_ether.h>
20 struct ipa_gsi_endpoint_data;
22 /* Non-zero granularity of counter used to implement aggregation timeout */
23 #define IPA_AGGR_GRANULARITY 500 /* microseconds */
25 #define IPA_MTU ETH_DATA_LEN
27 enum ipa_endpoint_name {
28 IPA_ENDPOINT_AP_COMMAND_TX,
29 IPA_ENDPOINT_AP_LAN_RX,
30 IPA_ENDPOINT_AP_MODEM_TX,
31 IPA_ENDPOINT_AP_MODEM_RX,
32 IPA_ENDPOINT_MODEM_COMMAND_TX,
33 IPA_ENDPOINT_MODEM_LAN_TX,
34 IPA_ENDPOINT_MODEM_LAN_RX,
35 IPA_ENDPOINT_MODEM_AP_TX,
36 IPA_ENDPOINT_MODEM_AP_RX,
37 IPA_ENDPOINT_MODEM_DL_NLO_TX,
38 IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
41 #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
44 * struct ipa_endpoint - IPA endpoint information
46 * @ee_id: Execution environmnent endpoint is associated with
47 * @channel_id: GSI channel used by the endpoint
48 * @endpoint_id: IPA endpoint number
49 * @toward_ipa: Endpoint direction (true = TX, false = RX)
50 * @data: Endpoint configuration data
51 * @trans_tre_max: Maximum number of TRE descriptors per transaction
52 * @evt_ring_id: GSI event ring used by the endpoint
53 * @netdev: Network device pointer, if endpoint uses one
54 * @replenish_enabled: Whether receive buffer replenishing is enabled
55 * @replenish_ready: Number of replenish transactions without doorbell
56 * @replenish_saved: Replenish requests held while disabled
57 * @replenish_backlog: Number of buffers needed to fill hardware queue
58 * @replenish_work: Work item used for repeated replenish failures
66 const struct ipa_endpoint_config_data *data;
71 /* Net device this endpoint is associated with, if any */
72 struct net_device *netdev;
74 /* Receive buffer replenishing for RX endpoints */
75 bool replenish_enabled;
77 atomic_t replenish_saved;
78 atomic_t replenish_backlog;
79 struct delayed_work replenish_work; /* global wq */
82 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
84 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
86 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
88 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
90 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
91 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
93 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
94 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
96 void ipa_endpoint_suspend(struct ipa *ipa);
97 void ipa_endpoint_resume(struct ipa *ipa);
99 void ipa_endpoint_setup(struct ipa *ipa);
100 void ipa_endpoint_teardown(struct ipa *ipa);
102 int ipa_endpoint_config(struct ipa *ipa);
103 void ipa_endpoint_deconfig(struct ipa *ipa);
105 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
106 void ipa_endpoint_default_route_clear(struct ipa *ipa);
108 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
109 const struct ipa_gsi_endpoint_data *data);
110 void ipa_endpoint_exit(struct ipa *ipa);
112 void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
113 struct gsi_trans *trans);
114 void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
115 struct gsi_trans *trans);
117 #endif /* _IPA_ENDPOINT_H_ */