1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
25 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
27 #define IPA_REPLENISH_BATCH 16
29 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
30 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
32 /* The amount of RX buffer space consumed by standard skb overhead */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
36 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
38 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
39 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
41 /** enum ipa_status_opcode - status element opcode hardware values */
42 enum ipa_status_opcode {
43 IPA_STATUS_OPCODE_PACKET = 0x01,
44 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
47 IPA_STATUS_OPCODE_LOG = 0x10,
48 IPA_STATUS_OPCODE_DCMP = 0x20,
49 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
52 /** enum ipa_status_exception - status element exception type */
53 enum ipa_status_exception {
54 /* 0 means no exception */
55 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
56 IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
57 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
58 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
59 IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
60 /* The meaning of the next value depends on whether the IP version */
61 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
62 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
65 /* Status element provided by hardware */
67 u8 opcode; /* enum ipa_status_opcode */
68 u8 exception; /* enum ipa_status_exception */
80 /* Field masks for struct ipa_status structure fields */
82 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
84 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
86 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
87 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
88 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
89 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
90 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
91 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
92 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
93 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
94 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
95 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
97 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
98 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
99 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
100 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
102 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
103 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
105 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
106 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
107 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
108 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
109 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
113 static void ipa_endpoint_validate_build(void)
115 /* The aggregation byte limit defines the point at which an
116 * aggregation window will close. It is programmed into the
117 * IPA hardware as a number of KB. We don't use "hard byte
118 * limit" aggregation, which means that we need to supply
119 * enough space in a receive buffer to hold a complete MTU
120 * plus normal skb overhead *after* that aggregation byte
121 * limit has been crossed.
123 * This check just ensures we don't define a receive buffer
124 * size that would exceed what we can represent in the field
125 * that is used to program its size.
127 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
128 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
129 IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
131 /* I honestly don't know where this requirement comes from. But
132 * it holds, and if we someday need to loosen the constraint we
133 * can try to track it down.
135 BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
138 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
139 const struct ipa_gsi_endpoint_data *all_data,
140 const struct ipa_gsi_endpoint_data *data)
142 const struct ipa_gsi_endpoint_data *other_data;
143 struct device *dev = &ipa->pdev->dev;
144 enum ipa_endpoint_name other_name;
146 if (ipa_gsi_endpoint_data_empty(data))
149 if (!data->toward_ipa) {
150 if (data->endpoint.filter_support) {
151 dev_err(dev, "filtering not supported for "
157 return true; /* Nothing more to check for RX */
160 if (data->endpoint.config.status_enable) {
161 other_name = data->endpoint.config.tx.status_endpoint;
162 if (other_name >= count) {
163 dev_err(dev, "status endpoint name %u out of range "
165 other_name, data->endpoint_id);
169 /* Status endpoint must be defined... */
170 other_data = &all_data[other_name];
171 if (ipa_gsi_endpoint_data_empty(other_data)) {
172 dev_err(dev, "DMA endpoint name %u undefined "
174 other_name, data->endpoint_id);
178 /* ...and has to be an RX endpoint... */
179 if (other_data->toward_ipa) {
181 "status endpoint for endpoint %u not RX\n",
186 /* ...and if it's to be an AP endpoint... */
187 if (other_data->ee_id == GSI_EE_AP) {
188 /* ...make sure it has status enabled. */
189 if (!other_data->endpoint.config.status_enable) {
191 "status not enabled for endpoint %u\n",
192 other_data->endpoint_id);
198 if (data->endpoint.config.dma_mode) {
199 other_name = data->endpoint.config.dma_endpoint;
200 if (other_name >= count) {
201 dev_err(dev, "DMA endpoint name %u out of range "
203 other_name, data->endpoint_id);
207 other_data = &all_data[other_name];
208 if (ipa_gsi_endpoint_data_empty(other_data)) {
209 dev_err(dev, "DMA endpoint name %u undefined "
211 other_name, data->endpoint_id);
219 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
220 const struct ipa_gsi_endpoint_data *data)
222 const struct ipa_gsi_endpoint_data *dp = data;
223 struct device *dev = &ipa->pdev->dev;
224 enum ipa_endpoint_name name;
226 ipa_endpoint_validate_build();
228 if (count > IPA_ENDPOINT_COUNT) {
229 dev_err(dev, "too many endpoints specified (%u > %u)\n",
230 count, IPA_ENDPOINT_COUNT);
234 /* Make sure needed endpoints have defined data */
235 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
236 dev_err(dev, "command TX endpoint not defined\n");
239 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
240 dev_err(dev, "LAN RX endpoint not defined\n");
243 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
244 dev_err(dev, "AP->modem TX endpoint not defined\n");
247 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
248 dev_err(dev, "AP<-modem RX endpoint not defined\n");
252 for (name = 0; name < count; name++, dp++)
253 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
259 #else /* !IPA_VALIDATE */
261 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
262 const struct ipa_gsi_endpoint_data *data)
267 #endif /* !IPA_VALIDATE */
269 /* Allocate a transaction to use on a non-command endpoint */
270 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
273 struct gsi *gsi = &endpoint->ipa->gsi;
274 u32 channel_id = endpoint->channel_id;
275 enum dma_data_direction direction;
277 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
279 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
282 /* suspend_delay represents suspend for RX, delay for TX endpoints.
283 * Note that suspend is not supported starting with IPA v4.0.
286 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
288 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
289 struct ipa *ipa = endpoint->ipa;
294 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
295 * correctly on IPA v4.2.
297 * if (endpoint->toward_ipa)
298 * assert(ipa->version != IPA_VERSION_4.2);
300 * assert(ipa->version == IPA_VERSION_3_5_1);
302 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
304 val = ioread32(ipa->reg_virt + offset);
305 /* Don't bother if it's already in the requested state */
306 state = !!(val & mask);
307 if (suspend_delay != state) {
309 iowrite32(val, ipa->reg_virt + offset);
315 /* We currently don't care what the previous state was for delay mode */
317 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
319 /* assert(endpoint->toward_ipa); */
321 (void)ipa_endpoint_init_ctrl(endpoint, enable);
324 /* Returns previous suspend state (true means it was enabled) */
326 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
328 /* assert(!endpoint->toward_ipa); */
330 return ipa_endpoint_init_ctrl(endpoint, enable);
333 /* Enable or disable delay or suspend mode on all modem endpoints */
334 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
336 bool support_suspend;
339 /* DELAY mode doesn't work correctly on IPA v4.2 */
340 if (ipa->version == IPA_VERSION_4_2)
343 /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
344 support_suspend = ipa->version == IPA_VERSION_3_5_1;
346 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
347 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
349 if (endpoint->ee_id != GSI_EE_MODEM)
352 /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
353 if (endpoint->toward_ipa)
354 ipa_endpoint_program_delay(endpoint, enable);
355 else if (support_suspend)
356 (void)ipa_endpoint_program_suspend(endpoint, enable);
360 /* Reset all modem endpoints to use the default exception endpoint */
361 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
363 u32 initialized = ipa->initialized;
364 struct gsi_trans *trans;
367 /* We need one command per modem TX endpoint. We can get an upper
368 * bound on that by assuming all initialized endpoints are modem->IPA.
369 * That won't happen, and we could be more precise, but this is fine
370 * for now. We need to end the transaction with a "tag process."
372 count = hweight32(initialized) + ipa_cmd_tag_process_count();
373 trans = ipa_cmd_trans_alloc(ipa, count);
375 dev_err(&ipa->pdev->dev,
376 "no transaction to reset modem exception endpoints\n");
380 while (initialized) {
381 u32 endpoint_id = __ffs(initialized);
382 struct ipa_endpoint *endpoint;
385 initialized ^= BIT(endpoint_id);
387 /* We only reset modem TX endpoints */
388 endpoint = &ipa->endpoint[endpoint_id];
389 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
392 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
394 /* Value written is 0, and all bits are updated. That
395 * means status is disabled on the endpoint, and as a
396 * result all other fields in the register are ignored.
398 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
401 ipa_cmd_tag_process_add(trans);
403 /* XXX This should have a 1 second timeout */
404 gsi_trans_commit_wait(trans);
409 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
411 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
414 /* FRAG_OFFLOAD_EN is 0 */
415 if (endpoint->data->checksum) {
416 if (endpoint->toward_ipa) {
419 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
420 CS_OFFLOAD_EN_FMASK);
421 /* Checksum header offset is in 4-byte units */
422 checksum_offset = sizeof(struct rmnet_map_header);
423 checksum_offset /= sizeof(u32);
424 val |= u32_encode_bits(checksum_offset,
425 CS_METADATA_HDR_OFFSET_FMASK);
427 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
428 CS_OFFLOAD_EN_FMASK);
431 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
432 CS_OFFLOAD_EN_FMASK);
434 /* CS_GEN_QMB_MASTER_SEL is 0 */
436 iowrite32(val, endpoint->ipa->reg_virt + offset);
440 * We program QMAP endpoints so each packet received is preceded by a QMAP
441 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
442 * packet size field, and we have the IPA hardware populate both for each
443 * received packet. The header is configured (in the HDR_EXT register)
444 * to use big endian format.
446 * The packet size is written into the QMAP header's pkt_len field. That
447 * location is defined here using the HDR_OFST_PKT_SIZE field.
449 * The mux_id comes from a 4-byte metadata value supplied with each packet
450 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
451 * value that we want, in its low-order byte. A bitmask defined in the
452 * endpoint's METADATA_MASK register defines which byte within the modem
453 * metadata contains the mux_id. And the OFST_METADATA field programmed
454 * here indicates where the extracted byte should be placed within the QMAP
457 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
459 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
462 if (endpoint->data->qmap) {
463 size_t header_size = sizeof(struct rmnet_map_header);
465 /* We might supply a checksum header after the QMAP header */
466 if (endpoint->toward_ipa && endpoint->data->checksum)
467 header_size += sizeof(struct rmnet_map_ul_csum_header);
468 val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
470 /* Define how to fill mux_id in a received QMAP header */
471 if (!endpoint->toward_ipa) {
472 u32 off; /* Field offset within header */
474 /* Where IPA will write the metadata value */
475 off = offsetof(struct rmnet_map_header, mux_id);
476 val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
478 /* Where IPA will write the length */
479 off = offsetof(struct rmnet_map_header, pkt_len);
480 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
481 val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
483 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
484 val |= HDR_OFST_METADATA_VALID_FMASK;
486 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
487 /* HDR_A5_MUX is 0 */
488 /* HDR_LEN_INC_DEAGG_HDR is 0 */
489 /* HDR_METADATA_REG_VALID is 0 (TX only) */
492 iowrite32(val, endpoint->ipa->reg_virt + offset);
495 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
497 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
498 u32 pad_align = endpoint->data->rx.pad_align;
501 val |= HDR_ENDIANNESS_FMASK; /* big endian */
502 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
503 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
504 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
505 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
506 if (!endpoint->toward_ipa)
507 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
509 iowrite32(val, endpoint->ipa->reg_virt + offset);
513 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
515 u32 endpoint_id = endpoint->endpoint_id;
519 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
521 /* Note that HDR_ENDIANNESS indicates big endian header fields */
522 if (!endpoint->toward_ipa && endpoint->data->qmap)
523 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
525 iowrite32(val, endpoint->ipa->reg_virt + offset);
528 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
530 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
533 if (endpoint->toward_ipa && endpoint->data->dma_mode) {
534 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
537 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
539 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
540 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
542 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
544 /* Other bitfields unspecified (and 0) */
546 iowrite32(val, endpoint->ipa->reg_virt + offset);
549 /* Compute the aggregation size value to use for a given buffer size */
550 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
552 /* We don't use "hard byte limit" aggregation, so we define the
553 * aggregation limit such that our buffer has enough space *after*
554 * that limit to receive a full MTU of data, plus overhead.
556 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
558 return rx_buffer_size / SZ_1K;
561 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
563 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
566 if (endpoint->data->aggregation) {
567 if (!endpoint->toward_ipa) {
568 u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
571 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
572 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
573 val |= u32_encode_bits(aggr_size,
574 AGGR_BYTE_LIMIT_FMASK);
575 limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
576 val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
577 AGGR_TIME_LIMIT_FMASK);
578 val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
579 if (endpoint->data->rx.aggr_close_eof)
580 val |= AGGR_SW_EOF_ACTIVE_FMASK;
581 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
583 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
585 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
586 /* other fields ignored */
588 /* AGGR_FORCE_CLOSE is 0 */
590 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
591 /* other fields ignored */
594 iowrite32(val, endpoint->ipa->reg_virt + offset);
597 /* A return value of 0 indicates an error */
598 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
605 return 0; /* invalid delay */
607 /* Timer is represented in units of clock ticks. */
608 if (ipa->version < IPA_VERSION_4_2)
609 return microseconds; /* XXX Needs to be computed */
611 /* IPA v4.2 represents the tick count as base * scale */
612 scale = 1; /* XXX Needs to be computed */
613 if (scale > field_max(SCALE_FMASK))
614 return 0; /* scale too big */
616 base = DIV_ROUND_CLOSEST(microseconds, scale);
617 if (base > field_max(BASE_VALUE_FMASK))
618 return 0; /* microseconds too big */
620 val = u32_encode_bits(scale, SCALE_FMASK);
621 val |= u32_encode_bits(base, BASE_VALUE_FMASK);
626 static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
629 u32 endpoint_id = endpoint->endpoint_id;
630 struct ipa *ipa = endpoint->ipa;
634 /* XXX We'll fix this when the register definition is clear */
636 struct device *dev = &ipa->pdev->dev;
638 dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
644 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
648 val = 0; /* timeout is immediate */
650 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
651 iowrite32(val, ipa->reg_virt + offset);
657 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
659 u32 endpoint_id = endpoint->endpoint_id;
663 val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
664 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
665 iowrite32(val, endpoint->ipa->reg_virt + offset);
668 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
672 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
673 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
675 if (endpoint->ee_id != GSI_EE_MODEM)
678 (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
679 ipa_endpoint_init_hol_block_enable(endpoint, true);
683 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
685 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
688 /* DEAGGR_HDR_LEN is 0 */
689 /* PACKET_OFFSET_VALID is 0 */
690 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
691 /* MAX_PACKET_LEN is 0 (not enforced) */
693 iowrite32(val, endpoint->ipa->reg_virt + offset);
696 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
698 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
699 u32 seq_type = endpoint->seq_type;
702 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
703 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
704 /* HPS_REP_SEQ_TYPE is 0 */
705 /* DPS_REP_SEQ_TYPE is 0 */
707 iowrite32(val, endpoint->ipa->reg_virt + offset);
711 * ipa_endpoint_skb_tx() - Transmit a socket buffer
712 * @endpoint: Endpoint pointer
713 * @skb: Socket buffer to send
715 * Returns: 0 if successful, or a negative error code
717 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
719 struct gsi_trans *trans;
723 /* Make sure source endpoint's TLV FIFO has enough entries to
724 * hold the linear portion of the skb and all its fragments.
725 * If not, see if we can linearize it before giving up.
727 nr_frags = skb_shinfo(skb)->nr_frags;
728 if (1 + nr_frags > endpoint->trans_tre_max) {
729 if (skb_linearize(skb))
734 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
738 ret = gsi_trans_skb_add(trans, skb);
741 trans->data = skb; /* transaction owns skb now */
743 gsi_trans_commit(trans, !netdev_xmit_more());
748 gsi_trans_free(trans);
753 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
755 u32 endpoint_id = endpoint->endpoint_id;
756 struct ipa *ipa = endpoint->ipa;
760 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
762 if (endpoint->data->status_enable) {
763 val |= STATUS_EN_FMASK;
764 if (endpoint->toward_ipa) {
765 enum ipa_endpoint_name name;
766 u32 status_endpoint_id;
768 name = endpoint->data->tx.status_endpoint;
769 status_endpoint_id = ipa->name_map[name]->endpoint_id;
771 val |= u32_encode_bits(status_endpoint_id,
774 /* STATUS_LOCATION is 0 (status element precedes packet) */
775 /* The next field is present for IPA v4.0 and above */
776 /* STATUS_PKT_SUPPRESS_FMASK is 0 */
779 iowrite32(val, ipa->reg_virt + offset);
782 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
784 struct gsi_trans *trans;
785 bool doorbell = false;
791 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
795 trans = ipa_endpoint_trans_alloc(endpoint, 1);
799 /* Offset the buffer to make space for skb headroom */
800 offset = NET_SKB_PAD;
801 len = IPA_RX_BUFFER_SIZE - offset;
803 ret = gsi_trans_page_add(trans, page, len, offset);
806 trans->data = page; /* transaction owns page now */
808 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
810 endpoint->replenish_ready = 0;
813 gsi_trans_commit(trans, doorbell);
818 gsi_trans_free(trans);
820 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
826 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
828 * Allocate RX packet wrapper structures with maximal socket buffers
829 * for an endpoint. These are supplied to the hardware, which fills
830 * them with incoming data.
832 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
837 if (!endpoint->replenish_enabled) {
839 atomic_add(count, &endpoint->replenish_saved);
844 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
845 if (ipa_endpoint_replenish_one(endpoint))
846 goto try_again_later;
848 atomic_add(count, &endpoint->replenish_backlog);
853 /* The last one didn't succeed, so fix the backlog */
854 backlog = atomic_inc_return(&endpoint->replenish_backlog);
857 atomic_add(count, &endpoint->replenish_backlog);
859 /* Whenever a receive buffer transaction completes we'll try to
860 * replenish again. It's unlikely, but if we fail to supply even
861 * one buffer, nothing will trigger another replenish attempt.
862 * Receive buffer transactions use one TRE, so schedule work to
863 * try replenishing again if our backlog is *all* available TREs.
865 gsi = &endpoint->ipa->gsi;
866 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
867 schedule_delayed_work(&endpoint->replenish_work,
868 msecs_to_jiffies(1));
871 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
873 struct gsi *gsi = &endpoint->ipa->gsi;
877 endpoint->replenish_enabled = true;
878 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
879 atomic_add(saved, &endpoint->replenish_backlog);
881 /* Start replenishing if hardware currently has no buffers */
882 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
883 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
884 ipa_endpoint_replenish(endpoint, 0);
887 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
891 endpoint->replenish_enabled = false;
892 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
893 atomic_add(backlog, &endpoint->replenish_saved);
896 static void ipa_endpoint_replenish_work(struct work_struct *work)
898 struct delayed_work *dwork = to_delayed_work(work);
899 struct ipa_endpoint *endpoint;
901 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
903 ipa_endpoint_replenish(endpoint, 0);
906 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
907 void *data, u32 len, u32 extra)
911 skb = __dev_alloc_skb(len, GFP_ATOMIC);
914 memcpy(skb->data, data, len);
915 skb->truesize += extra;
918 /* Now receive it, or drop it if there's no netdev */
919 if (endpoint->netdev)
920 ipa_modem_skb_rx(endpoint->netdev, skb);
922 dev_kfree_skb_any(skb);
925 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
926 struct page *page, u32 len)
930 /* Nothing to do if there's no netdev */
931 if (!endpoint->netdev)
934 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
935 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
937 /* Reserve the headroom and account for the data */
938 skb_reserve(skb, NET_SKB_PAD);
942 /* Receive the buffer (or record drop if unable to build it) */
943 ipa_modem_skb_rx(endpoint->netdev, skb);
948 /* The format of a packet status element is the same for several status
949 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
950 * aren't currently supported
952 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
955 case IPA_STATUS_OPCODE_PACKET:
956 case IPA_STATUS_OPCODE_DROPPED_PACKET:
957 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
958 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
965 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
966 const struct ipa_status *status)
970 if (!ipa_status_format_packet(status->opcode))
972 if (!status->pkt_len)
974 endpoint_id = u32_get_bits(status->endp_dst_idx,
975 IPA_STATUS_DST_IDX_FMASK);
976 if (endpoint_id != endpoint->endpoint_id)
979 return false; /* Don't skip this packet, process it */
982 /* Return whether the status indicates the packet should be dropped */
983 static bool ipa_status_drop_packet(const struct ipa_status *status)
987 /* Deaggregation exceptions we drop; others we consume */
988 if (status->exception)
989 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
991 /* Drop the packet if it fails to match a routing rule; otherwise no */
992 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
994 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
997 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
998 struct page *page, u32 total_len)
1000 void *data = page_address(page) + NET_SKB_PAD;
1001 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1002 u32 resid = total_len;
1005 const struct ipa_status *status = data;
1009 if (resid < sizeof(*status)) {
1010 dev_err(&endpoint->ipa->pdev->dev,
1011 "short message (%u bytes < %zu byte status)\n",
1012 resid, sizeof(*status));
1016 /* Skip over status packets that lack packet data */
1017 if (ipa_endpoint_status_skip(endpoint, status)) {
1018 data += sizeof(*status);
1019 resid -= sizeof(*status);
1023 /* Compute the amount of buffer space consumed by the
1024 * packet, including the status element. If the hardware
1025 * is configured to pad packet data to an aligned boundary,
1026 * account for that. And if checksum offload is is enabled
1027 * a trailer containing computed checksum information will
1030 align = endpoint->data->rx.pad_align ? : 1;
1031 len = le16_to_cpu(status->pkt_len);
1032 len = sizeof(*status) + ALIGN(len, align);
1033 if (endpoint->data->checksum)
1034 len += sizeof(struct rmnet_map_dl_csum_trailer);
1036 /* Charge the new packet with a proportional fraction of
1037 * the unused space in the original receive buffer.
1038 * XXX Charge a proportion of the *whole* receive buffer?
1040 if (!ipa_status_drop_packet(status)) {
1041 u32 extra = unused * len / total_len;
1042 void *data2 = data + sizeof(*status);
1043 u32 len2 = le16_to_cpu(status->pkt_len);
1045 /* Client receives only packet data (no status) */
1046 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1049 /* Consume status and the full packet it describes */
1055 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1056 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1057 struct gsi_trans *trans)
1061 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1062 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1063 struct gsi_trans *trans)
1067 ipa_endpoint_replenish(endpoint, 1);
1069 if (trans->cancelled)
1072 /* Parse or build a socket buffer using the actual received length */
1074 if (endpoint->data->status_enable)
1075 ipa_endpoint_status_parse(endpoint, page, trans->len);
1076 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1077 trans->data = NULL; /* Pages have been consumed */
1080 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1081 struct gsi_trans *trans)
1083 if (endpoint->toward_ipa)
1084 ipa_endpoint_tx_complete(endpoint, trans);
1086 ipa_endpoint_rx_complete(endpoint, trans);
1089 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1090 struct gsi_trans *trans)
1092 if (endpoint->toward_ipa) {
1093 struct ipa *ipa = endpoint->ipa;
1095 /* Nothing to do for command transactions */
1096 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1097 struct sk_buff *skb = trans->data;
1100 dev_kfree_skb_any(skb);
1103 struct page *page = trans->data;
1106 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1110 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1114 /* ROUTE_DIS is 0 */
1115 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1116 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1117 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1118 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1119 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1121 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1124 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1126 ipa_endpoint_default_route_set(ipa, 0);
1129 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
1131 u32 mask = BIT(endpoint->endpoint_id);
1132 struct ipa *ipa = endpoint->ipa;
1136 /* assert(mask & ipa->available); */
1137 offset = ipa_reg_state_aggr_active_offset(ipa->version);
1138 val = ioread32(ipa->reg_virt + offset);
1140 return !!(val & mask);
1143 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
1145 u32 mask = BIT(endpoint->endpoint_id);
1146 struct ipa *ipa = endpoint->ipa;
1148 /* assert(mask & ipa->available); */
1149 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
1153 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1154 * @endpoint: Endpoint to be reset
1156 * If aggregation is active on an RX endpoint when a reset is performed
1157 * on its underlying GSI channel, a special sequence of actions must be
1158 * taken to ensure the IPA pipeline is properly cleared.
1160 * @Return: 0 if successful, or a negative error code
1162 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1164 struct device *dev = &endpoint->ipa->pdev->dev;
1165 struct ipa *ipa = endpoint->ipa;
1166 struct gsi *gsi = &ipa->gsi;
1167 bool suspended = false;
1175 virt = kzalloc(len, GFP_KERNEL);
1179 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1180 if (dma_mapping_error(dev, addr)) {
1185 /* Force close aggregation before issuing the reset */
1186 ipa_endpoint_force_close(endpoint);
1188 /* Reset and reconfigure the channel with the doorbell engine
1189 * disabled. Then poll until we know aggregation is no longer
1190 * active. We'll re-enable the doorbell (if appropriate) when
1191 * we reset again below.
1193 gsi_channel_reset(gsi, endpoint->channel_id, false);
1195 /* Make sure the channel isn't suspended */
1196 if (endpoint->ipa->version == IPA_VERSION_3_5_1)
1197 suspended = ipa_endpoint_program_suspend(endpoint, false);
1199 /* Start channel and do a 1 byte read */
1200 ret = gsi_channel_start(gsi, endpoint->channel_id);
1202 goto out_suspend_again;
1204 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1206 goto err_endpoint_stop;
1208 /* Wait for aggregation to be closed on the channel */
1209 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1211 if (!ipa_endpoint_aggr_active(endpoint))
1214 } while (retries--);
1216 /* Check one last time */
1217 if (ipa_endpoint_aggr_active(endpoint))
1218 dev_err(dev, "endpoint %u still active during reset\n",
1219 endpoint->endpoint_id);
1221 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1223 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1225 goto out_suspend_again;
1227 /* Finally, reset and reconfigure the channel again (re-enabling the
1228 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1229 * complete the channel reset sequence. Finish by suspending the
1230 * channel again (if necessary).
1232 legacy = ipa->version == IPA_VERSION_3_5_1;
1233 gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1237 goto out_suspend_again;
1240 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1243 (void)ipa_endpoint_program_suspend(endpoint, true);
1244 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1251 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1253 u32 channel_id = endpoint->channel_id;
1254 struct ipa *ipa = endpoint->ipa;
1259 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1260 * is active, we need to handle things specially to recover.
1261 * All other cases just need to reset the underlying GSI channel.
1263 * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
1265 legacy = ipa->version == IPA_VERSION_3_5_1;
1266 special = !endpoint->toward_ipa && endpoint->data->aggregation;
1267 if (special && ipa_endpoint_aggr_active(endpoint))
1268 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1270 gsi_channel_reset(&ipa->gsi, channel_id, legacy);
1273 dev_err(&ipa->pdev->dev,
1274 "error %d resetting channel %u for endpoint %u\n",
1275 ret, endpoint->channel_id, endpoint->endpoint_id);
1278 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1280 if (endpoint->toward_ipa) {
1281 if (endpoint->ipa->version != IPA_VERSION_4_2)
1282 ipa_endpoint_program_delay(endpoint, false);
1283 ipa_endpoint_init_hdr_ext(endpoint);
1284 ipa_endpoint_init_aggr(endpoint);
1285 ipa_endpoint_init_deaggr(endpoint);
1286 ipa_endpoint_init_seq(endpoint);
1288 if (endpoint->ipa->version == IPA_VERSION_3_5_1)
1289 (void)ipa_endpoint_program_suspend(endpoint, false);
1290 ipa_endpoint_init_hdr_ext(endpoint);
1291 ipa_endpoint_init_aggr(endpoint);
1293 ipa_endpoint_init_cfg(endpoint);
1294 ipa_endpoint_init_hdr(endpoint);
1295 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1296 ipa_endpoint_init_mode(endpoint);
1297 ipa_endpoint_status(endpoint);
1300 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1302 struct ipa *ipa = endpoint->ipa;
1303 struct gsi *gsi = &ipa->gsi;
1306 ret = gsi_channel_start(gsi, endpoint->channel_id);
1308 dev_err(&ipa->pdev->dev,
1309 "error %d starting %cX channel %u for endpoint %u\n",
1310 ret, endpoint->toward_ipa ? 'T' : 'R',
1311 endpoint->channel_id, endpoint->endpoint_id);
1315 if (!endpoint->toward_ipa) {
1316 ipa_interrupt_suspend_enable(ipa->interrupt,
1317 endpoint->endpoint_id);
1318 ipa_endpoint_replenish_enable(endpoint);
1321 ipa->enabled |= BIT(endpoint->endpoint_id);
1326 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1328 u32 mask = BIT(endpoint->endpoint_id);
1329 struct ipa *ipa = endpoint->ipa;
1330 struct gsi *gsi = &ipa->gsi;
1333 if (!(ipa->enabled & mask))
1336 ipa->enabled ^= mask;
1338 if (!endpoint->toward_ipa) {
1339 ipa_endpoint_replenish_disable(endpoint);
1340 ipa_interrupt_suspend_disable(ipa->interrupt,
1341 endpoint->endpoint_id);
1344 /* Note that if stop fails, the channel's state is not well-defined */
1345 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1347 dev_err(&ipa->pdev->dev,
1348 "error %d attempting to stop endpoint %u\n", ret,
1349 endpoint->endpoint_id);
1353 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
1354 * @endpoint_id: Endpoint on which to emulate a suspend
1356 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
1357 * with an open aggregation frame. This is to work around a hardware
1358 * issue in IPA version 3.5.1 where the suspend interrupt will not be
1359 * generated when it should be.
1361 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
1363 struct ipa *ipa = endpoint->ipa;
1365 /* assert(ipa->version == IPA_VERSION_3_5_1); */
1367 if (!endpoint->data->aggregation)
1370 /* Nothing to do if the endpoint doesn't have aggregation open */
1371 if (!ipa_endpoint_aggr_active(endpoint))
1374 /* Force close aggregation */
1375 ipa_endpoint_force_close(endpoint);
1377 ipa_interrupt_simulate_suspend(ipa->interrupt);
1380 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1382 struct device *dev = &endpoint->ipa->pdev->dev;
1383 struct gsi *gsi = &endpoint->ipa->gsi;
1387 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1390 if (!endpoint->toward_ipa)
1391 ipa_endpoint_replenish_disable(endpoint);
1393 /* IPA v3.5.1 doesn't use channel stop for suspend */
1394 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1395 if (!endpoint->toward_ipa && !stop_channel) {
1396 /* Due to a hardware bug, a client suspended with an open
1397 * aggregation frame will not generate a SUSPEND IPA
1398 * interrupt. We work around this by force-closing the
1399 * aggregation frame, then simulating the arrival of such
1402 (void)ipa_endpoint_program_suspend(endpoint, true);
1403 ipa_endpoint_suspend_aggr(endpoint);
1406 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1408 dev_err(dev, "error %d suspending channel %u\n", ret,
1409 endpoint->channel_id);
1412 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1414 struct device *dev = &endpoint->ipa->pdev->dev;
1415 struct gsi *gsi = &endpoint->ipa->gsi;
1419 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1422 /* IPA v3.5.1 doesn't use channel start for resume */
1423 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1424 if (!endpoint->toward_ipa && !start_channel)
1425 (void)ipa_endpoint_program_suspend(endpoint, false);
1427 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1429 dev_err(dev, "error %d resuming channel %u\n", ret,
1430 endpoint->channel_id);
1431 else if (!endpoint->toward_ipa)
1432 ipa_endpoint_replenish_enable(endpoint);
1435 void ipa_endpoint_suspend(struct ipa *ipa)
1437 if (ipa->modem_netdev)
1438 ipa_modem_suspend(ipa->modem_netdev);
1440 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1441 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1444 void ipa_endpoint_resume(struct ipa *ipa)
1446 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1447 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1449 if (ipa->modem_netdev)
1450 ipa_modem_resume(ipa->modem_netdev);
1453 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1455 struct gsi *gsi = &endpoint->ipa->gsi;
1456 u32 channel_id = endpoint->channel_id;
1458 /* Only AP endpoints get set up */
1459 if (endpoint->ee_id != GSI_EE_AP)
1462 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1463 if (!endpoint->toward_ipa) {
1464 /* RX transactions require a single TRE, so the maximum
1465 * backlog is the same as the maximum outstanding TREs.
1467 endpoint->replenish_enabled = false;
1468 atomic_set(&endpoint->replenish_saved,
1469 gsi_channel_tre_max(gsi, endpoint->channel_id));
1470 atomic_set(&endpoint->replenish_backlog, 0);
1471 INIT_DELAYED_WORK(&endpoint->replenish_work,
1472 ipa_endpoint_replenish_work);
1475 ipa_endpoint_program(endpoint);
1477 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1480 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1482 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1484 if (!endpoint->toward_ipa)
1485 cancel_delayed_work_sync(&endpoint->replenish_work);
1487 ipa_endpoint_reset(endpoint);
1490 void ipa_endpoint_setup(struct ipa *ipa)
1492 u32 initialized = ipa->initialized;
1495 while (initialized) {
1496 u32 endpoint_id = __ffs(initialized);
1498 initialized ^= BIT(endpoint_id);
1500 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1504 void ipa_endpoint_teardown(struct ipa *ipa)
1506 u32 set_up = ipa->set_up;
1509 u32 endpoint_id = __fls(set_up);
1511 set_up ^= BIT(endpoint_id);
1513 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1518 int ipa_endpoint_config(struct ipa *ipa)
1520 struct device *dev = &ipa->pdev->dev;
1529 /* Find out about the endpoints supplied by the hardware, and ensure
1530 * the highest one doesn't exceed the number we support.
1532 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1534 /* Our RX is an IPA producer */
1535 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1536 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1537 if (max > IPA_ENDPOINT_MAX) {
1538 dev_err(dev, "too many endpoints (%u > %u)\n",
1539 max, IPA_ENDPOINT_MAX);
1542 rx_mask = GENMASK(max - 1, rx_base);
1544 /* Our TX is an IPA consumer */
1545 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1546 tx_mask = GENMASK(max - 1, 0);
1548 ipa->available = rx_mask | tx_mask;
1550 /* Check for initialized endpoints not supported by the hardware */
1551 if (ipa->initialized & ~ipa->available) {
1552 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1553 ipa->initialized & ~ipa->available);
1554 ret = -EINVAL; /* Report other errors too */
1557 initialized = ipa->initialized;
1558 while (initialized) {
1559 u32 endpoint_id = __ffs(initialized);
1560 struct ipa_endpoint *endpoint;
1562 initialized ^= BIT(endpoint_id);
1564 /* Make sure it's pointing in the right direction */
1565 endpoint = &ipa->endpoint[endpoint_id];
1566 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1567 dev_err(dev, "endpoint id %u wrong direction\n",
1576 void ipa_endpoint_deconfig(struct ipa *ipa)
1578 ipa->available = 0; /* Nothing more to do */
1581 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1582 const struct ipa_gsi_endpoint_data *data)
1584 struct ipa_endpoint *endpoint;
1586 endpoint = &ipa->endpoint[data->endpoint_id];
1588 if (data->ee_id == GSI_EE_AP)
1589 ipa->channel_map[data->channel_id] = endpoint;
1590 ipa->name_map[name] = endpoint;
1592 endpoint->ipa = ipa;
1593 endpoint->ee_id = data->ee_id;
1594 endpoint->seq_type = data->endpoint.seq_type;
1595 endpoint->channel_id = data->channel_id;
1596 endpoint->endpoint_id = data->endpoint_id;
1597 endpoint->toward_ipa = data->toward_ipa;
1598 endpoint->data = &data->endpoint.config;
1600 ipa->initialized |= BIT(endpoint->endpoint_id);
1603 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1605 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1607 memset(endpoint, 0, sizeof(*endpoint));
1610 void ipa_endpoint_exit(struct ipa *ipa)
1612 u32 initialized = ipa->initialized;
1614 while (initialized) {
1615 u32 endpoint_id = __fls(initialized);
1617 initialized ^= BIT(endpoint_id);
1619 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1621 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1622 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1625 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1626 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1627 const struct ipa_gsi_endpoint_data *data)
1629 enum ipa_endpoint_name name;
1632 if (!ipa_endpoint_data_valid(ipa, count, data))
1633 return 0; /* Error */
1635 ipa->initialized = 0;
1638 for (name = 0; name < count; name++, data++) {
1639 if (ipa_gsi_endpoint_data_empty(data))
1640 continue; /* Skip over empty slots */
1642 ipa_endpoint_init_one(ipa, name, data);
1644 if (data->endpoint.filter_support)
1645 filter_map |= BIT(data->endpoint_id);
1648 if (!ipa_filter_map_valid(ipa, filter_map))
1649 goto err_endpoint_exit;
1651 return filter_map; /* Non-zero bitmask */
1654 ipa_endpoint_exit(ipa);
1656 return 0; /* Error */