1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
24 #include "ipa_clock.h"
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
28 #define IPA_REPLENISH_BATCH 16
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
33 /* The amount of RX buffer space consumed by standard skb overhead */
34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
40 #define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */
42 /** enum ipa_status_opcode - status element opcode hardware values */
43 enum ipa_status_opcode {
44 IPA_STATUS_OPCODE_PACKET = 0x01,
45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
50 /** enum ipa_status_exception - status element exception type */
51 enum ipa_status_exception {
52 /* 0 means no exception */
53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
56 /* Status element provided by hardware */
58 u8 opcode; /* enum ipa_status_opcode */
59 u8 exception; /* enum ipa_status_exception */
71 /* Field masks for struct ipa_status structure fields */
72 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
73 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
77 static void ipa_endpoint_validate_build(void)
79 /* The aggregation byte limit defines the point at which an
80 * aggregation window will close. It is programmed into the
81 * IPA hardware as a number of KB. We don't use "hard byte
82 * limit" aggregation, which means that we need to supply
83 * enough space in a receive buffer to hold a complete MTU
84 * plus normal skb overhead *after* that aggregation byte
85 * limit has been crossed.
87 * This check just ensures we don't define a receive buffer
88 * size that would exceed what we can represent in the field
89 * that is used to program its size.
91 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
92 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
93 IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
95 /* I honestly don't know where this requirement comes from. But
96 * it holds, and if we someday need to loosen the constraint we
97 * can try to track it down.
99 BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
103 const struct ipa_gsi_endpoint_data *all_data,
104 const struct ipa_gsi_endpoint_data *data)
106 const struct ipa_gsi_endpoint_data *other_data;
107 struct device *dev = &ipa->pdev->dev;
108 enum ipa_endpoint_name other_name;
110 if (ipa_gsi_endpoint_data_empty(data))
113 if (!data->toward_ipa) {
114 if (data->endpoint.filter_support) {
115 dev_err(dev, "filtering not supported for "
121 return true; /* Nothing more to check for RX */
124 if (data->endpoint.config.status_enable) {
125 other_name = data->endpoint.config.tx.status_endpoint;
126 if (other_name >= count) {
127 dev_err(dev, "status endpoint name %u out of range "
129 other_name, data->endpoint_id);
133 /* Status endpoint must be defined... */
134 other_data = &all_data[other_name];
135 if (ipa_gsi_endpoint_data_empty(other_data)) {
136 dev_err(dev, "DMA endpoint name %u undefined "
138 other_name, data->endpoint_id);
142 /* ...and has to be an RX endpoint... */
143 if (other_data->toward_ipa) {
145 "status endpoint for endpoint %u not RX\n",
150 /* ...and if it's to be an AP endpoint... */
151 if (other_data->ee_id == GSI_EE_AP) {
152 /* ...make sure it has status enabled. */
153 if (!other_data->endpoint.config.status_enable) {
155 "status not enabled for endpoint %u\n",
156 other_data->endpoint_id);
162 if (data->endpoint.config.dma_mode) {
163 other_name = data->endpoint.config.dma_endpoint;
164 if (other_name >= count) {
165 dev_err(dev, "DMA endpoint name %u out of range "
167 other_name, data->endpoint_id);
171 other_data = &all_data[other_name];
172 if (ipa_gsi_endpoint_data_empty(other_data)) {
173 dev_err(dev, "DMA endpoint name %u undefined "
175 other_name, data->endpoint_id);
183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
184 const struct ipa_gsi_endpoint_data *data)
186 const struct ipa_gsi_endpoint_data *dp = data;
187 struct device *dev = &ipa->pdev->dev;
188 enum ipa_endpoint_name name;
190 ipa_endpoint_validate_build();
192 if (count > IPA_ENDPOINT_COUNT) {
193 dev_err(dev, "too many endpoints specified (%u > %u)\n",
194 count, IPA_ENDPOINT_COUNT);
198 /* Make sure needed endpoints have defined data */
199 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
200 dev_err(dev, "command TX endpoint not defined\n");
203 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
204 dev_err(dev, "LAN RX endpoint not defined\n");
207 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
208 dev_err(dev, "AP->modem TX endpoint not defined\n");
211 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
212 dev_err(dev, "AP<-modem RX endpoint not defined\n");
216 for (name = 0; name < count; name++, dp++)
217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
223 #else /* !IPA_VALIDATE */
225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
226 const struct ipa_gsi_endpoint_data *data)
231 #endif /* !IPA_VALIDATE */
233 /* Allocate a transaction to use on a non-command endpoint */
234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
237 struct gsi *gsi = &endpoint->ipa->gsi;
238 u32 channel_id = endpoint->channel_id;
239 enum dma_data_direction direction;
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
243 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
246 /* suspend_delay represents suspend for RX, delay for TX endpoints.
247 * Note that suspend is not supported starting with IPA v4.0.
250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
253 struct ipa *ipa = endpoint->ipa;
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
259 * correctly on IPA v4.2.
261 * if (endpoint->toward_ipa)
262 * assert(ipa->version != IPA_VERSION_4.2);
264 * assert(ipa->version == IPA_VERSION_3_5_1);
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
268 val = ioread32(ipa->reg_virt + offset);
269 /* Don't bother if it's already in the requested state */
270 state = !!(val & mask);
271 if (suspend_delay != state) {
273 iowrite32(val, ipa->reg_virt + offset);
279 /* We currently don't care what the previous state was for delay mode */
281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
283 /* assert(endpoint->toward_ipa); */
285 /* Delay mode doesn't work properly for IPA v4.2 */
286 if (endpoint->ipa->version != IPA_VERSION_4_2)
287 (void)ipa_endpoint_init_ctrl(endpoint, enable);
290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
292 u32 mask = BIT(endpoint->endpoint_id);
293 struct ipa *ipa = endpoint->ipa;
297 /* assert(mask & ipa->available); */
298 offset = ipa_reg_state_aggr_active_offset(ipa->version);
299 val = ioread32(ipa->reg_virt + offset);
301 return !!(val & mask);
304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
306 u32 mask = BIT(endpoint->endpoint_id);
307 struct ipa *ipa = endpoint->ipa;
309 /* assert(mask & ipa->available); */
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
314 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
315 * @endpoint: Endpoint on which to emulate a suspend
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
318 * with an open aggregation frame. This is to work around a hardware
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
320 * generated when it should be.
322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
324 struct ipa *ipa = endpoint->ipa;
326 if (!endpoint->data->aggregation)
329 /* Nothing to do if the endpoint doesn't have aggregation open */
330 if (!ipa_endpoint_aggr_active(endpoint))
333 /* Force close aggregation */
334 ipa_endpoint_force_close(endpoint);
336 ipa_interrupt_simulate_suspend(ipa->interrupt);
339 /* Returns previous suspend state (true means suspend was enabled) */
341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1)
346 return enable; /* For IPA v4.0+, no change made */
348 /* assert(!endpoint->toward_ipa); */
350 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
352 /* A client suspended with an open aggregation frame will not
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have
354 * ipa_endpoint_suspend_aggr() handle this.
356 if (enable && !suspended)
357 ipa_endpoint_suspend_aggr(endpoint);
362 /* Enable or disable delay or suspend mode on all modem endpoints */
363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
367 /* DELAY mode doesn't work correctly on IPA v4.2 */
368 if (ipa->version == IPA_VERSION_4_2)
371 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
374 if (endpoint->ee_id != GSI_EE_MODEM)
377 /* Set TX delay mode or RX suspend mode */
378 if (endpoint->toward_ipa)
379 ipa_endpoint_program_delay(endpoint, enable);
381 (void)ipa_endpoint_program_suspend(endpoint, enable);
385 /* Reset all modem endpoints to use the default exception endpoint */
386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
388 u32 initialized = ipa->initialized;
389 struct gsi_trans *trans;
392 /* We need one command per modem TX endpoint. We can get an upper
393 * bound on that by assuming all initialized endpoints are modem->IPA.
394 * That won't happen, and we could be more precise, but this is fine
395 * for now. We need to end the transaction with a "tag process."
397 count = hweight32(initialized) + ipa_cmd_tag_process_count();
398 trans = ipa_cmd_trans_alloc(ipa, count);
400 dev_err(&ipa->pdev->dev,
401 "no transaction to reset modem exception endpoints\n");
405 while (initialized) {
406 u32 endpoint_id = __ffs(initialized);
407 struct ipa_endpoint *endpoint;
410 initialized ^= BIT(endpoint_id);
412 /* We only reset modem TX endpoints */
413 endpoint = &ipa->endpoint[endpoint_id];
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
417 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
419 /* Value written is 0, and all bits are updated. That
420 * means status is disabled on the endpoint, and as a
421 * result all other fields in the register are ignored.
423 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
426 ipa_cmd_tag_process_add(trans);
428 /* XXX This should have a 1 second timeout */
429 gsi_trans_commit_wait(trans);
434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
439 /* FRAG_OFFLOAD_EN is 0 */
440 if (endpoint->data->checksum) {
441 if (endpoint->toward_ipa) {
444 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
445 CS_OFFLOAD_EN_FMASK);
446 /* Checksum header offset is in 4-byte units */
447 checksum_offset = sizeof(struct rmnet_map_header);
448 checksum_offset /= sizeof(u32);
449 val |= u32_encode_bits(checksum_offset,
450 CS_METADATA_HDR_OFFSET_FMASK);
452 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
453 CS_OFFLOAD_EN_FMASK);
456 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
457 CS_OFFLOAD_EN_FMASK);
459 /* CS_GEN_QMB_MASTER_SEL is 0 */
461 iowrite32(val, endpoint->ipa->reg_virt + offset);
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
466 * @endpoint: Endpoint pointer
468 * We program QMAP endpoints so each packet received is preceded by a QMAP
469 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
470 * packet size field, and we have the IPA hardware populate both for each
471 * received packet. The header is configured (in the HDR_EXT register)
472 * to use big endian format.
474 * The packet size is written into the QMAP header's pkt_len field. That
475 * location is defined here using the HDR_OFST_PKT_SIZE field.
477 * The mux_id comes from a 4-byte metadata value supplied with each packet
478 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
479 * value that we want, in its low-order byte. A bitmask defined in the
480 * endpoint's METADATA_MASK register defines which byte within the modem
481 * metadata contains the mux_id. And the OFST_METADATA field programmed
482 * here indicates where the extracted byte should be placed within the QMAP
485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
488 struct ipa *ipa = endpoint->ipa;
491 if (endpoint->data->qmap) {
492 size_t header_size = sizeof(struct rmnet_map_header);
493 enum ipa_version version = ipa->version;
495 /* We might supply a checksum header after the QMAP header */
496 if (endpoint->toward_ipa && endpoint->data->checksum)
497 header_size += sizeof(struct rmnet_map_ul_csum_header);
498 val |= ipa_header_size_encoded(version, header_size);
500 /* Define how to fill fields in a received QMAP header */
501 if (!endpoint->toward_ipa) {
502 u32 offset; /* Field offset within header */
504 /* Where IPA will write the metadata value */
505 offset = offsetof(struct rmnet_map_header, mux_id);
506 val |= ipa_metadata_offset_encoded(version, offset);
508 /* Where IPA will write the length */
509 offset = offsetof(struct rmnet_map_header, pkt_len);
510 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
511 if (version == IPA_VERSION_4_5)
512 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
514 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
515 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
517 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
518 val |= HDR_OFST_METADATA_VALID_FMASK;
520 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
521 /* HDR_A5_MUX is 0 */
522 /* HDR_LEN_INC_DEAGG_HDR is 0 */
523 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
526 iowrite32(val, ipa->reg_virt + offset);
529 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
531 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
532 u32 pad_align = endpoint->data->rx.pad_align;
533 struct ipa *ipa = endpoint->ipa;
536 val |= HDR_ENDIANNESS_FMASK; /* big endian */
538 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
539 * driver assumes this field is meaningful in packets it receives,
540 * and assumes the header's payload length includes that padding.
541 * The RMNet driver does *not* pad packets it sends, however, so
542 * the pad field (although 0) should be ignored.
544 if (endpoint->data->qmap && !endpoint->toward_ipa) {
545 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
546 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
547 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
548 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
551 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
552 if (!endpoint->toward_ipa)
553 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
555 /* IPA v4.5 adds some most-significant bits to a few fields,
556 * two of which are defined in the HDR (not HDR_EXT) register.
558 if (ipa->version == IPA_VERSION_4_5) {
559 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
560 if (endpoint->data->qmap && !endpoint->toward_ipa) {
563 offset = offsetof(struct rmnet_map_header, pkt_len);
564 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
565 val |= u32_encode_bits(offset,
566 HDR_OFST_PKT_SIZE_MSB_FMASK);
567 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
570 iowrite32(val, ipa->reg_virt + offset);
573 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
575 u32 endpoint_id = endpoint->endpoint_id;
579 if (endpoint->toward_ipa)
580 return; /* Register not valid for TX endpoints */
582 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
584 /* Note that HDR_ENDIANNESS indicates big endian header fields */
585 if (endpoint->data->qmap)
586 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
588 iowrite32(val, endpoint->ipa->reg_virt + offset);
591 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
593 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
596 if (!endpoint->toward_ipa)
597 return; /* Register not valid for RX endpoints */
599 if (endpoint->data->dma_mode) {
600 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
603 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
605 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
606 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
608 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
610 /* All other bits unspecified (and 0) */
612 iowrite32(val, endpoint->ipa->reg_virt + offset);
615 /* Compute the aggregation size value to use for a given buffer size */
616 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
618 /* We don't use "hard byte limit" aggregation, so we define the
619 * aggregation limit such that our buffer has enough space *after*
620 * that limit to receive a full MTU of data, plus overhead.
622 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
624 return rx_buffer_size / SZ_1K;
627 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
629 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
632 if (endpoint->data->aggregation) {
633 if (!endpoint->toward_ipa) {
636 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
637 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
639 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
640 val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
642 limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
643 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
644 val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
646 /* AGGR_PKT_LIMIT is 0 (unlimited) */
648 if (endpoint->data->rx.aggr_close_eof)
649 val |= AGGR_SW_EOF_ACTIVE_FMASK;
650 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
652 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
654 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
655 /* other fields ignored */
657 /* AGGR_FORCE_CLOSE is 0 */
658 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
660 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
661 /* other fields ignored */
664 iowrite32(val, endpoint->ipa->reg_virt + offset);
667 /* The head-of-line blocking timer is defined as a tick count, where each
668 * tick represents 128 cycles of the IPA core clock. Return the value
669 * that should be written to that register that represents the timeout
672 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
682 return 0; /* Nothing to compute if timer period is 0 */
684 /* Use 64 bit arithmetic to avoid overflow... */
685 rate = ipa_clock_rate(ipa);
686 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
687 /* ...but we still need to fit into a 32-bit register */
688 WARN_ON(ticks > U32_MAX);
690 /* IPA v3.5.1 through v4.1 just record the tick count */
691 if (ipa->version < IPA_VERSION_4_2)
694 /* For IPA v4.2, the tick count is represented by base and
695 * scale fields within the 32-bit timer register, where:
696 * ticks = base << scale;
697 * The best precision is achieved when the base value is as
698 * large as possible. Find the highest set bit in the tick
699 * count, and extract the number of bits in the base field
700 * such that that high bit is included.
702 high = fls(ticks); /* 1..32 */
703 width = HWEIGHT32(BASE_VALUE_FMASK);
704 scale = high > width ? high - width : 0;
706 /* If we're scaling, round up to get a closer result */
707 ticks += 1 << (scale - 1);
708 /* High bit was set, so rounding might have affected it */
709 if (fls(ticks) != high)
713 val = u32_encode_bits(scale, SCALE_FMASK);
714 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
719 /* If microseconds is 0, timeout is immediate */
720 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
723 u32 endpoint_id = endpoint->endpoint_id;
724 struct ipa *ipa = endpoint->ipa;
728 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
729 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
730 iowrite32(val, ipa->reg_virt + offset);
734 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
736 u32 endpoint_id = endpoint->endpoint_id;
740 val = enable ? HOL_BLOCK_EN_FMASK : 0;
741 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
742 iowrite32(val, endpoint->ipa->reg_virt + offset);
745 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
749 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
750 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
752 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
755 ipa_endpoint_init_hol_block_timer(endpoint, 0);
756 ipa_endpoint_init_hol_block_enable(endpoint, true);
760 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
762 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
765 if (!endpoint->toward_ipa)
766 return; /* Register not valid for RX endpoints */
768 /* DEAGGR_HDR_LEN is 0 */
769 /* PACKET_OFFSET_VALID is 0 */
770 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
771 /* MAX_PACKET_LEN is 0 (not enforced) */
773 iowrite32(val, endpoint->ipa->reg_virt + offset);
776 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
778 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
779 struct ipa *ipa = endpoint->ipa;
782 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
783 iowrite32(val, ipa->reg_virt + offset);
786 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
788 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
789 u32 seq_type = endpoint->seq_type;
792 if (!endpoint->toward_ipa)
793 return; /* Register not valid for RX endpoints */
795 /* Sequencer type is made up of four nibbles */
796 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
797 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
798 /* The second two apply to replicated packets */
799 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
800 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
802 iowrite32(val, endpoint->ipa->reg_virt + offset);
806 * ipa_endpoint_skb_tx() - Transmit a socket buffer
807 * @endpoint: Endpoint pointer
808 * @skb: Socket buffer to send
810 * Returns: 0 if successful, or a negative error code
812 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
814 struct gsi_trans *trans;
818 /* Make sure source endpoint's TLV FIFO has enough entries to
819 * hold the linear portion of the skb and all its fragments.
820 * If not, see if we can linearize it before giving up.
822 nr_frags = skb_shinfo(skb)->nr_frags;
823 if (1 + nr_frags > endpoint->trans_tre_max) {
824 if (skb_linearize(skb))
829 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
833 ret = gsi_trans_skb_add(trans, skb);
836 trans->data = skb; /* transaction owns skb now */
838 gsi_trans_commit(trans, !netdev_xmit_more());
843 gsi_trans_free(trans);
848 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
850 u32 endpoint_id = endpoint->endpoint_id;
851 struct ipa *ipa = endpoint->ipa;
855 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
857 if (endpoint->data->status_enable) {
858 val |= STATUS_EN_FMASK;
859 if (endpoint->toward_ipa) {
860 enum ipa_endpoint_name name;
861 u32 status_endpoint_id;
863 name = endpoint->data->tx.status_endpoint;
864 status_endpoint_id = ipa->name_map[name]->endpoint_id;
866 val |= u32_encode_bits(status_endpoint_id,
869 /* STATUS_LOCATION is 0, meaning status element precedes
870 * packet (not present for IPA v4.5)
872 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
875 iowrite32(val, ipa->reg_virt + offset);
878 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
880 struct gsi_trans *trans;
881 bool doorbell = false;
887 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
891 trans = ipa_endpoint_trans_alloc(endpoint, 1);
895 /* Offset the buffer to make space for skb headroom */
896 offset = NET_SKB_PAD;
897 len = IPA_RX_BUFFER_SIZE - offset;
899 ret = gsi_trans_page_add(trans, page, len, offset);
902 trans->data = page; /* transaction owns page now */
904 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
906 endpoint->replenish_ready = 0;
909 gsi_trans_commit(trans, doorbell);
914 gsi_trans_free(trans);
916 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
922 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
923 * @endpoint: Endpoint to be replenished
924 * @count: Number of buffers to send to hardware
926 * Allocate RX packet wrapper structures with maximal socket buffers
927 * for an endpoint. These are supplied to the hardware, which fills
928 * them with incoming data.
930 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
935 if (!endpoint->replenish_enabled) {
937 atomic_add(count, &endpoint->replenish_saved);
942 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
943 if (ipa_endpoint_replenish_one(endpoint))
944 goto try_again_later;
946 atomic_add(count, &endpoint->replenish_backlog);
951 /* The last one didn't succeed, so fix the backlog */
952 backlog = atomic_inc_return(&endpoint->replenish_backlog);
955 atomic_add(count, &endpoint->replenish_backlog);
957 /* Whenever a receive buffer transaction completes we'll try to
958 * replenish again. It's unlikely, but if we fail to supply even
959 * one buffer, nothing will trigger another replenish attempt.
960 * Receive buffer transactions use one TRE, so schedule work to
961 * try replenishing again if our backlog is *all* available TREs.
963 gsi = &endpoint->ipa->gsi;
964 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
965 schedule_delayed_work(&endpoint->replenish_work,
966 msecs_to_jiffies(1));
969 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
971 struct gsi *gsi = &endpoint->ipa->gsi;
975 endpoint->replenish_enabled = true;
976 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
977 atomic_add(saved, &endpoint->replenish_backlog);
979 /* Start replenishing if hardware currently has no buffers */
980 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
981 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
982 ipa_endpoint_replenish(endpoint, 0);
985 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
989 endpoint->replenish_enabled = false;
990 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
991 atomic_add(backlog, &endpoint->replenish_saved);
994 static void ipa_endpoint_replenish_work(struct work_struct *work)
996 struct delayed_work *dwork = to_delayed_work(work);
997 struct ipa_endpoint *endpoint;
999 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1001 ipa_endpoint_replenish(endpoint, 0);
1004 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1005 void *data, u32 len, u32 extra)
1007 struct sk_buff *skb;
1009 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1012 memcpy(skb->data, data, len);
1013 skb->truesize += extra;
1016 /* Now receive it, or drop it if there's no netdev */
1017 if (endpoint->netdev)
1018 ipa_modem_skb_rx(endpoint->netdev, skb);
1020 dev_kfree_skb_any(skb);
1023 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1024 struct page *page, u32 len)
1026 struct sk_buff *skb;
1028 /* Nothing to do if there's no netdev */
1029 if (!endpoint->netdev)
1032 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1033 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1035 /* Reserve the headroom and account for the data */
1036 skb_reserve(skb, NET_SKB_PAD);
1040 /* Receive the buffer (or record drop if unable to build it) */
1041 ipa_modem_skb_rx(endpoint->netdev, skb);
1046 /* The format of a packet status element is the same for several status
1047 * types (opcodes). Other types aren't currently supported.
1049 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1052 case IPA_STATUS_OPCODE_PACKET:
1053 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1054 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1055 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1062 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1063 const struct ipa_status *status)
1067 if (!ipa_status_format_packet(status->opcode))
1069 if (!status->pkt_len)
1071 endpoint_id = u32_get_bits(status->endp_dst_idx,
1072 IPA_STATUS_DST_IDX_FMASK);
1073 if (endpoint_id != endpoint->endpoint_id)
1076 return false; /* Don't skip this packet, process it */
1079 /* Return whether the status indicates the packet should be dropped */
1080 static bool ipa_status_drop_packet(const struct ipa_status *status)
1084 /* Deaggregation exceptions we drop; all other types we consume */
1085 if (status->exception)
1086 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1088 /* Drop the packet if it fails to match a routing rule; otherwise no */
1089 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1091 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1094 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1095 struct page *page, u32 total_len)
1097 void *data = page_address(page) + NET_SKB_PAD;
1098 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1099 u32 resid = total_len;
1102 const struct ipa_status *status = data;
1106 if (resid < sizeof(*status)) {
1107 dev_err(&endpoint->ipa->pdev->dev,
1108 "short message (%u bytes < %zu byte status)\n",
1109 resid, sizeof(*status));
1113 /* Skip over status packets that lack packet data */
1114 if (ipa_endpoint_status_skip(endpoint, status)) {
1115 data += sizeof(*status);
1116 resid -= sizeof(*status);
1120 /* Compute the amount of buffer space consumed by the
1121 * packet, including the status element. If the hardware
1122 * is configured to pad packet data to an aligned boundary,
1123 * account for that. And if checksum offload is is enabled
1124 * a trailer containing computed checksum information will
1127 align = endpoint->data->rx.pad_align ? : 1;
1128 len = le16_to_cpu(status->pkt_len);
1129 len = sizeof(*status) + ALIGN(len, align);
1130 if (endpoint->data->checksum)
1131 len += sizeof(struct rmnet_map_dl_csum_trailer);
1133 /* Charge the new packet with a proportional fraction of
1134 * the unused space in the original receive buffer.
1135 * XXX Charge a proportion of the *whole* receive buffer?
1137 if (!ipa_status_drop_packet(status)) {
1138 u32 extra = unused * len / total_len;
1139 void *data2 = data + sizeof(*status);
1140 u32 len2 = le16_to_cpu(status->pkt_len);
1142 /* Client receives only packet data (no status) */
1143 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1146 /* Consume status and the full packet it describes */
1152 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1153 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1154 struct gsi_trans *trans)
1158 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1159 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1160 struct gsi_trans *trans)
1164 ipa_endpoint_replenish(endpoint, 1);
1166 if (trans->cancelled)
1169 /* Parse or build a socket buffer using the actual received length */
1171 if (endpoint->data->status_enable)
1172 ipa_endpoint_status_parse(endpoint, page, trans->len);
1173 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1174 trans->data = NULL; /* Pages have been consumed */
1177 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1178 struct gsi_trans *trans)
1180 if (endpoint->toward_ipa)
1181 ipa_endpoint_tx_complete(endpoint, trans);
1183 ipa_endpoint_rx_complete(endpoint, trans);
1186 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1187 struct gsi_trans *trans)
1189 if (endpoint->toward_ipa) {
1190 struct ipa *ipa = endpoint->ipa;
1192 /* Nothing to do for command transactions */
1193 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1194 struct sk_buff *skb = trans->data;
1197 dev_kfree_skb_any(skb);
1200 struct page *page = trans->data;
1203 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1207 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1211 /* ROUTE_DIS is 0 */
1212 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1213 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1214 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1215 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1216 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1218 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1221 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1223 ipa_endpoint_default_route_set(ipa, 0);
1227 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1228 * @endpoint: Endpoint to be reset
1230 * If aggregation is active on an RX endpoint when a reset is performed
1231 * on its underlying GSI channel, a special sequence of actions must be
1232 * taken to ensure the IPA pipeline is properly cleared.
1234 * Return: 0 if successful, or a negative error code
1236 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1238 struct device *dev = &endpoint->ipa->pdev->dev;
1239 struct ipa *ipa = endpoint->ipa;
1240 struct gsi *gsi = &ipa->gsi;
1241 bool suspended = false;
1248 virt = kzalloc(len, GFP_KERNEL);
1252 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1253 if (dma_mapping_error(dev, addr)) {
1258 /* Force close aggregation before issuing the reset */
1259 ipa_endpoint_force_close(endpoint);
1261 /* Reset and reconfigure the channel with the doorbell engine
1262 * disabled. Then poll until we know aggregation is no longer
1263 * active. We'll re-enable the doorbell (if appropriate) when
1264 * we reset again below.
1266 gsi_channel_reset(gsi, endpoint->channel_id, false);
1268 /* Make sure the channel isn't suspended */
1269 suspended = ipa_endpoint_program_suspend(endpoint, false);
1271 /* Start channel and do a 1 byte read */
1272 ret = gsi_channel_start(gsi, endpoint->channel_id);
1274 goto out_suspend_again;
1276 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1278 goto err_endpoint_stop;
1280 /* Wait for aggregation to be closed on the channel */
1281 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1283 if (!ipa_endpoint_aggr_active(endpoint))
1286 } while (retries--);
1288 /* Check one last time */
1289 if (ipa_endpoint_aggr_active(endpoint))
1290 dev_err(dev, "endpoint %u still active during reset\n",
1291 endpoint->endpoint_id);
1293 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1295 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1297 goto out_suspend_again;
1299 /* Finally, reset and reconfigure the channel again (re-enabling the
1300 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1301 * complete the channel reset sequence. Finish by suspending the
1302 * channel again (if necessary).
1304 gsi_channel_reset(gsi, endpoint->channel_id, true);
1308 goto out_suspend_again;
1311 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1314 (void)ipa_endpoint_program_suspend(endpoint, true);
1315 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1322 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1324 u32 channel_id = endpoint->channel_id;
1325 struct ipa *ipa = endpoint->ipa;
1329 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1330 * is active, we need to handle things specially to recover.
1331 * All other cases just need to reset the underlying GSI channel.
1333 special = ipa->version == IPA_VERSION_3_5_1 &&
1334 !endpoint->toward_ipa &&
1335 endpoint->data->aggregation;
1336 if (special && ipa_endpoint_aggr_active(endpoint))
1337 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1339 gsi_channel_reset(&ipa->gsi, channel_id, true);
1342 dev_err(&ipa->pdev->dev,
1343 "error %d resetting channel %u for endpoint %u\n",
1344 ret, endpoint->channel_id, endpoint->endpoint_id);
1347 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1349 if (endpoint->toward_ipa)
1350 ipa_endpoint_program_delay(endpoint, false);
1352 (void)ipa_endpoint_program_suspend(endpoint, false);
1353 ipa_endpoint_init_cfg(endpoint);
1354 ipa_endpoint_init_hdr(endpoint);
1355 ipa_endpoint_init_hdr_ext(endpoint);
1356 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1357 ipa_endpoint_init_mode(endpoint);
1358 ipa_endpoint_init_aggr(endpoint);
1359 ipa_endpoint_init_deaggr(endpoint);
1360 ipa_endpoint_init_rsrc_grp(endpoint);
1361 ipa_endpoint_init_seq(endpoint);
1362 ipa_endpoint_status(endpoint);
1365 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1367 struct ipa *ipa = endpoint->ipa;
1368 struct gsi *gsi = &ipa->gsi;
1371 ret = gsi_channel_start(gsi, endpoint->channel_id);
1373 dev_err(&ipa->pdev->dev,
1374 "error %d starting %cX channel %u for endpoint %u\n",
1375 ret, endpoint->toward_ipa ? 'T' : 'R',
1376 endpoint->channel_id, endpoint->endpoint_id);
1380 if (!endpoint->toward_ipa) {
1381 ipa_interrupt_suspend_enable(ipa->interrupt,
1382 endpoint->endpoint_id);
1383 ipa_endpoint_replenish_enable(endpoint);
1386 ipa->enabled |= BIT(endpoint->endpoint_id);
1391 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1393 u32 mask = BIT(endpoint->endpoint_id);
1394 struct ipa *ipa = endpoint->ipa;
1395 struct gsi *gsi = &ipa->gsi;
1398 if (!(ipa->enabled & mask))
1401 ipa->enabled ^= mask;
1403 if (!endpoint->toward_ipa) {
1404 ipa_endpoint_replenish_disable(endpoint);
1405 ipa_interrupt_suspend_disable(ipa->interrupt,
1406 endpoint->endpoint_id);
1409 /* Note that if stop fails, the channel's state is not well-defined */
1410 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1412 dev_err(&ipa->pdev->dev,
1413 "error %d attempting to stop endpoint %u\n", ret,
1414 endpoint->endpoint_id);
1417 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1419 struct device *dev = &endpoint->ipa->pdev->dev;
1420 struct gsi *gsi = &endpoint->ipa->gsi;
1424 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1427 if (!endpoint->toward_ipa) {
1428 ipa_endpoint_replenish_disable(endpoint);
1429 (void)ipa_endpoint_program_suspend(endpoint, true);
1432 /* IPA v3.5.1 doesn't use channel stop for suspend */
1433 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1434 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1436 dev_err(dev, "error %d suspending channel %u\n", ret,
1437 endpoint->channel_id);
1440 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1442 struct device *dev = &endpoint->ipa->pdev->dev;
1443 struct gsi *gsi = &endpoint->ipa->gsi;
1447 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1450 if (!endpoint->toward_ipa)
1451 (void)ipa_endpoint_program_suspend(endpoint, false);
1453 /* IPA v3.5.1 doesn't use channel start for resume */
1454 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1455 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1457 dev_err(dev, "error %d resuming channel %u\n", ret,
1458 endpoint->channel_id);
1459 else if (!endpoint->toward_ipa)
1460 ipa_endpoint_replenish_enable(endpoint);
1463 void ipa_endpoint_suspend(struct ipa *ipa)
1465 if (!ipa->setup_complete)
1468 if (ipa->modem_netdev)
1469 ipa_modem_suspend(ipa->modem_netdev);
1471 ipa_cmd_tag_process(ipa);
1473 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1474 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1477 void ipa_endpoint_resume(struct ipa *ipa)
1479 if (!ipa->setup_complete)
1482 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1483 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1485 if (ipa->modem_netdev)
1486 ipa_modem_resume(ipa->modem_netdev);
1489 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1491 struct gsi *gsi = &endpoint->ipa->gsi;
1492 u32 channel_id = endpoint->channel_id;
1494 /* Only AP endpoints get set up */
1495 if (endpoint->ee_id != GSI_EE_AP)
1498 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1499 if (!endpoint->toward_ipa) {
1500 /* RX transactions require a single TRE, so the maximum
1501 * backlog is the same as the maximum outstanding TREs.
1503 endpoint->replenish_enabled = false;
1504 atomic_set(&endpoint->replenish_saved,
1505 gsi_channel_tre_max(gsi, endpoint->channel_id));
1506 atomic_set(&endpoint->replenish_backlog, 0);
1507 INIT_DELAYED_WORK(&endpoint->replenish_work,
1508 ipa_endpoint_replenish_work);
1511 ipa_endpoint_program(endpoint);
1513 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1516 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1518 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1520 if (!endpoint->toward_ipa)
1521 cancel_delayed_work_sync(&endpoint->replenish_work);
1523 ipa_endpoint_reset(endpoint);
1526 void ipa_endpoint_setup(struct ipa *ipa)
1528 u32 initialized = ipa->initialized;
1531 while (initialized) {
1532 u32 endpoint_id = __ffs(initialized);
1534 initialized ^= BIT(endpoint_id);
1536 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1540 void ipa_endpoint_teardown(struct ipa *ipa)
1542 u32 set_up = ipa->set_up;
1545 u32 endpoint_id = __fls(set_up);
1547 set_up ^= BIT(endpoint_id);
1549 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1554 int ipa_endpoint_config(struct ipa *ipa)
1556 struct device *dev = &ipa->pdev->dev;
1565 /* Find out about the endpoints supplied by the hardware, and ensure
1566 * the highest one doesn't exceed the number we support.
1568 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1570 /* Our RX is an IPA producer */
1571 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1572 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1573 if (max > IPA_ENDPOINT_MAX) {
1574 dev_err(dev, "too many endpoints (%u > %u)\n",
1575 max, IPA_ENDPOINT_MAX);
1578 rx_mask = GENMASK(max - 1, rx_base);
1580 /* Our TX is an IPA consumer */
1581 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1582 tx_mask = GENMASK(max - 1, 0);
1584 ipa->available = rx_mask | tx_mask;
1586 /* Check for initialized endpoints not supported by the hardware */
1587 if (ipa->initialized & ~ipa->available) {
1588 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1589 ipa->initialized & ~ipa->available);
1590 ret = -EINVAL; /* Report other errors too */
1593 initialized = ipa->initialized;
1594 while (initialized) {
1595 u32 endpoint_id = __ffs(initialized);
1596 struct ipa_endpoint *endpoint;
1598 initialized ^= BIT(endpoint_id);
1600 /* Make sure it's pointing in the right direction */
1601 endpoint = &ipa->endpoint[endpoint_id];
1602 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1603 dev_err(dev, "endpoint id %u wrong direction\n",
1612 void ipa_endpoint_deconfig(struct ipa *ipa)
1614 ipa->available = 0; /* Nothing more to do */
1617 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1618 const struct ipa_gsi_endpoint_data *data)
1620 struct ipa_endpoint *endpoint;
1622 endpoint = &ipa->endpoint[data->endpoint_id];
1624 if (data->ee_id == GSI_EE_AP)
1625 ipa->channel_map[data->channel_id] = endpoint;
1626 ipa->name_map[name] = endpoint;
1628 endpoint->ipa = ipa;
1629 endpoint->ee_id = data->ee_id;
1630 endpoint->seq_type = data->endpoint.seq_type;
1631 endpoint->channel_id = data->channel_id;
1632 endpoint->endpoint_id = data->endpoint_id;
1633 endpoint->toward_ipa = data->toward_ipa;
1634 endpoint->data = &data->endpoint.config;
1636 ipa->initialized |= BIT(endpoint->endpoint_id);
1639 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1641 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1643 memset(endpoint, 0, sizeof(*endpoint));
1646 void ipa_endpoint_exit(struct ipa *ipa)
1648 u32 initialized = ipa->initialized;
1650 while (initialized) {
1651 u32 endpoint_id = __fls(initialized);
1653 initialized ^= BIT(endpoint_id);
1655 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1657 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1658 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1661 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1662 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1663 const struct ipa_gsi_endpoint_data *data)
1665 enum ipa_endpoint_name name;
1668 if (!ipa_endpoint_data_valid(ipa, count, data))
1669 return 0; /* Error */
1671 ipa->initialized = 0;
1674 for (name = 0; name < count; name++, data++) {
1675 if (ipa_gsi_endpoint_data_empty(data))
1676 continue; /* Skip over empty slots */
1678 ipa_endpoint_init_one(ipa, name, data);
1680 if (data->endpoint.filter_support)
1681 filter_map |= BIT(data->endpoint_id);
1684 if (!ipa_filter_map_valid(ipa, filter_map))
1685 goto err_endpoint_exit;
1687 return filter_map; /* Non-zero bitmask */
1690 ipa_endpoint_exit(ipa);
1692 return 0; /* Error */