1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
8 /* To support tunneling entries by PF, the package will append the PF number to
9 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
11 static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
21 ICE_SID_XLT_KEY_BUILDER_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
47 ICE_SID_XLT_KEY_BUILDER_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
73 ICE_SID_XLT_KEY_BUILDER_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
85 * ice_sect_id - returns section ID
89 * This helper function returns the proper section ID given a block type and a
92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
94 return ice_sect_lkup[blk][sect];
99 * @buf: pointer to the ice buffer
101 * This helper function validates a buffer's header.
103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
105 struct ice_buf_hdr *hdr;
109 hdr = (struct ice_buf_hdr *)buf->buf;
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
124 * @ice_seg: pointer to the ice segment
126 * Returns the address of the buffer table within the ice segment.
128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
130 struct ice_nvm_table *nvms;
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
142 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143 * @state: pointer to the enum state
145 * This function will enumerate all the buffers in the ice segment. The first
146 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147 * ice_seg is set to NULL which continues the enumeration. When the function
148 * returns a NULL pointer, then the end of the buffers has been reached, or an
149 * unexpected value has been detected (for example an invalid section count or
150 * an invalid buffer end value).
152 static struct ice_buf_hdr *
153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
161 return ice_pkg_val_buf(state->buf_table->buf_array);
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
172 * ice_pkg_advance_sect
173 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174 * @state: pointer to the enum state
176 * This helper function will advance the section within the ice segment,
177 * also advancing the buffer if needed.
180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
182 if (!ice_seg && !state->buf)
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
193 /* start of new buffer, reset section index */
199 * ice_pkg_enum_section
200 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201 * @state: pointer to the enum state
202 * @sect_type: section type to enumerate
204 * This function will enumerate all the sections of a particular type in the
205 * ice segment. The first call is made with the ice_seg parameter non-NULL;
206 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207 * When the function returns a NULL pointer, then the end of the matching
208 * sections has been reached.
211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
217 state->type = sect_type;
219 if (!ice_pkg_advance_sect(ice_seg, state))
222 /* scan for next matching section */
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
228 /* validate section */
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
237 /* make sure the section fits in the buffer */
238 if (offset + size > ICE_PKG_BUF_SIZE)
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
244 /* calc pointer to this section */
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
253 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254 * @state: pointer to the enum state
255 * @sect_type: section type to enumerate
256 * @offset: pointer to variable that receives the offset in the table (optional)
257 * @handler: function that handles access to the entries into the section type
259 * This function will enumerate all the entries in particular section type in
260 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262 * When the function returns a NULL pointer, then the end of the entries has
265 * Since each section may have a different header and entry size, the handler
266 * function is needed to determine the number and location entries in each
269 * The offset parameter is optional, but should be used for sections that
270 * contain an offset for each section table. For such cases, the section handler
271 * function must return the appropriate offset + index to give the absolution
272 * offset for each entry. For example, if the base for a section's header
273 * indicates a base offset of 10, and the index for the entry is 2, then
274 * section handler function should set the offset to 10 + 2 = 12.
277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
291 state->entry_idx = 0;
292 state->handler = handler;
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
304 /* end of a section, look for another section of this type */
305 if (!ice_pkg_enum_section(NULL, state, 0))
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
317 * ice_boost_tcam_handler
318 * @sect_type: section type
319 * @section: pointer to section
320 * @index: index of the boost TCAM entry to be returned
321 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
323 * This is a callback function that can be passed to ice_pkg_enum_entry.
324 * Handles enumeration of individual boost TCAM entries.
327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
329 struct ice_boost_tcam_section *boost;
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
337 /* cppcheck-suppress nullPointer */
338 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
345 if (index >= le16_to_cpu(boost->count))
348 return boost->tcam + index;
352 * ice_find_boost_entry
353 * @ice_seg: pointer to the ice segment (non-NULL)
354 * @addr: Boost TCAM address of entry to search for
355 * @entry: returns pointer to the entry
357 * Finds a particular Boost TCAM entry and returns a pointer to that entry
358 * if it is found. The ice_seg parameter must not be NULL since the first call
359 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
361 static enum ice_status
362 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 struct ice_boost_tcam_entry **entry)
365 struct ice_boost_tcam_entry *tcam;
366 struct ice_pkg_enum state;
368 memset(&state, 0, sizeof(state));
371 return ICE_ERR_PARAM;
374 tcam = ice_pkg_enum_entry(ice_seg, &state,
375 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
376 ice_boost_tcam_handler);
377 if (tcam && le16_to_cpu(tcam->addr) == addr) {
390 * ice_label_enum_handler
391 * @sect_type: section type
392 * @section: pointer to section
393 * @index: index of the label entry to be returned
394 * @offset: pointer to receive absolute offset, always zero for label sections
396 * This is a callback function that can be passed to ice_pkg_enum_entry.
397 * Handles enumeration of individual label entries.
400 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
403 struct ice_label_section *labels;
408 /* cppcheck-suppress nullPointer */
409 if (index > ICE_MAX_LABELS_IN_BUF)
416 if (index >= le16_to_cpu(labels->count))
419 return labels->label + index;
424 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
425 * @type: the section type that will contain the label (0 on subsequent calls)
426 * @state: ice_pkg_enum structure that will hold the state of the enumeration
427 * @value: pointer to a value that will return the label's value if found
429 * Enumerates a list of labels in the package. The caller will call
430 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
431 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
432 * the end of the list has been reached.
435 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
438 struct ice_label *label;
440 /* Check for valid label section on first call */
441 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
444 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
445 ice_label_enum_handler);
449 *value = le16_to_cpu(label->value);
455 * @hw: pointer to the HW structure
456 * @ice_seg: pointer to the segment of the package scan (non-NULL)
458 * This function will scan the package and save off relevant information
459 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
460 * since the first call to ice_enum_labels requires a pointer to an actual
463 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
465 struct ice_pkg_enum state;
470 memset(&hw->tnl, 0, sizeof(hw->tnl));
471 memset(&state, 0, sizeof(state));
476 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
479 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 size_t len = strlen(tnls[i].label_prefix);
483 /* Look for matching label start, before continuing */
484 if (strncmp(label_name, tnls[i].label_prefix, len))
487 /* Make sure this label matches our PF. Note that the PF
488 * character ('0' - '7') will be located where our
489 * prefix string's null terminator is located.
491 if ((label_name[len] - '0') == hw->pf_id) {
492 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 hw->tnl.tbl[hw->tnl.count].valid = false;
494 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 hw->tnl.tbl[hw->tnl.count].port = 0;
501 label_name = ice_enum_labels(NULL, 0, &state, &val);
504 /* Cache the appropriate boost TCAM entry pointers */
505 for (i = 0; i < hw->tnl.count; i++) {
506 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 &hw->tnl.tbl[i].boost_entry);
508 if (hw->tnl.tbl[i].boost_entry) {
509 hw->tnl.tbl[i].valid = true;
510 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
511 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
518 #define ICE_DC_KEY 0x1 /* don't care */
519 #define ICE_DC_KEYINV 0x1
520 #define ICE_NM_KEY 0x0 /* never match */
521 #define ICE_NM_KEYINV 0x0
522 #define ICE_0_KEY 0x1 /* match 0 */
523 #define ICE_0_KEYINV 0x0
524 #define ICE_1_KEY 0x0 /* match 1 */
525 #define ICE_1_KEYINV 0x1
528 * ice_gen_key_word - generate 16-bits of a key/mask word
530 * @valid: valid bits mask (change only the valid bits)
531 * @dont_care: don't care mask
532 * @nvr_mtch: never match mask
533 * @key: pointer to an array of where the resulting key portion
534 * @key_inv: pointer to an array of where the resulting key invert portion
536 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
537 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
538 * of key and 8 bits of key invert.
540 * '0' = b01, always match a 0 bit
541 * '1' = b10, always match a 1 bit
542 * '?' = b11, don't care bit (always matches)
543 * '~' = b00, never match bit
547 * dont_care: b0 0 1 1 0 0
548 * never_mtch: b0 0 0 0 1 1
549 * ------------------------------
550 * Result: key: b01 10 11 11 00 00
552 static enum ice_status
553 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
556 u8 in_key = *key, in_key_inv = *key_inv;
559 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
560 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
566 /* encode the 8 bits into 8-bit key and 8-bit key invert */
567 for (i = 0; i < 8; i++) {
571 if (!(valid & 0x1)) { /* change only valid bits */
572 *key |= (in_key & 0x1) << 7;
573 *key_inv |= (in_key_inv & 0x1) << 7;
574 } else if (dont_care & 0x1) { /* don't care bit */
575 *key |= ICE_DC_KEY << 7;
576 *key_inv |= ICE_DC_KEYINV << 7;
577 } else if (nvr_mtch & 0x1) { /* never match bit */
578 *key |= ICE_NM_KEY << 7;
579 *key_inv |= ICE_NM_KEYINV << 7;
580 } else if (val & 0x01) { /* exact 1 match */
581 *key |= ICE_1_KEY << 7;
582 *key_inv |= ICE_1_KEYINV << 7;
583 } else { /* exact 0 match */
584 *key |= ICE_0_KEY << 7;
585 *key_inv |= ICE_0_KEYINV << 7;
600 * ice_bits_max_set - determine if the number of bits set is within a maximum
601 * @mask: pointer to the byte array which is the mask
602 * @size: the number of bytes in the mask
603 * @max: the max number of set bits
605 * This function determines if there are at most 'max' number of bits set in an
606 * array. Returns true if the number for bits set is <= max or will return false
609 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
614 /* check each byte */
615 for (i = 0; i < size; i++) {
616 /* if 0, go to next byte */
620 /* We know there is at least one set bit in this byte because of
621 * the above check; if we already have found 'max' number of
622 * bits set, then we can return failure now.
627 /* count the bits in this byte, checking threshold */
628 count += hweight8(mask[i]);
637 * ice_set_key - generate a variable sized key with multiples of 16-bits
638 * @key: pointer to where the key will be stored
639 * @size: the size of the complete key in bytes (must be even)
640 * @val: array of 8-bit values that makes up the value portion of the key
641 * @upd: array of 8-bit masks that determine what key portion to update
642 * @dc: array of 8-bit masks that make up the don't care mask
643 * @nm: array of 8-bit masks that make up the never match mask
644 * @off: the offset of the first byte in the key to update
645 * @len: the number of bytes in the key update
647 * This function generates a key from a value, a don't care mask and a never
649 * upd, dc, and nm are optional parameters, and can be NULL:
650 * upd == NULL --> upd mask is all 1's (update all bits)
651 * dc == NULL --> dc mask is all 0's (no don't care bits)
652 * nm == NULL --> nm mask is all 0's (no never match bits)
654 static enum ice_status
655 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
661 /* size must be a multiple of 2 bytes. */
665 half_size = size / 2;
666 if (off + len > half_size)
669 /* Make sure at most one bit is set in the never match mask. Having more
670 * than one never match mask bit set will cause HW to consume excessive
671 * power otherwise; this is a power management efficiency check.
673 #define ICE_NVR_MTCH_BITS_MAX 1
674 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
677 for (i = 0; i < len; i++)
678 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
679 dc ? dc[i] : 0, nm ? nm[i] : 0,
680 key + off + i, key + half_size + off + i))
687 * ice_acquire_global_cfg_lock
688 * @hw: pointer to the HW structure
689 * @access: access type (read or write)
691 * This function will request ownership of the global config lock for reading
692 * or writing of the package. When attempting to obtain write access, the
693 * caller must check for the following two return values:
695 * ICE_SUCCESS - Means the caller has acquired the global config lock
696 * and can perform writing of the package.
697 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
698 * package or has found that no update was necessary; in
699 * this case, the caller can just skip performing any
700 * update of the package.
702 static enum ice_status
703 ice_acquire_global_cfg_lock(struct ice_hw *hw,
704 enum ice_aq_res_access_type access)
706 enum ice_status status;
708 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
709 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
712 mutex_lock(&ice_global_cfg_lock_sw);
713 else if (status == ICE_ERR_AQ_NO_WORK)
714 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
720 * ice_release_global_cfg_lock
721 * @hw: pointer to the HW structure
723 * This function will release the global config lock.
725 static void ice_release_global_cfg_lock(struct ice_hw *hw)
727 mutex_unlock(&ice_global_cfg_lock_sw);
728 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
732 * ice_acquire_change_lock
733 * @hw: pointer to the HW structure
734 * @access: access type (read or write)
736 * This function will request ownership of the change lock.
738 static enum ice_status
739 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
741 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
742 ICE_CHANGE_LOCK_TIMEOUT);
746 * ice_release_change_lock
747 * @hw: pointer to the HW structure
749 * This function will release the change lock using the proper Admin Command.
751 static void ice_release_change_lock(struct ice_hw *hw)
753 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
757 * ice_aq_download_pkg
758 * @hw: pointer to the hardware structure
759 * @pkg_buf: the package buffer to transfer
760 * @buf_size: the size of the package buffer
761 * @last_buf: last buffer indicator
762 * @error_offset: returns error offset
763 * @error_info: returns error information
764 * @cd: pointer to command details structure or NULL
766 * Download Package (0x0C40)
768 static enum ice_status
769 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
770 u16 buf_size, bool last_buf, u32 *error_offset,
771 u32 *error_info, struct ice_sq_cd *cd)
773 struct ice_aqc_download_pkg *cmd;
774 struct ice_aq_desc desc;
775 enum ice_status status;
782 cmd = &desc.params.download_pkg;
783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
784 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
787 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
789 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
790 if (status == ICE_ERR_AQ_ERROR) {
791 /* Read error from buffer only when the FW returned an error */
792 struct ice_aqc_download_pkg_resp *resp;
794 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
796 *error_offset = le32_to_cpu(resp->error_offset);
798 *error_info = le32_to_cpu(resp->error_info);
806 * @hw: pointer to the hardware structure
807 * @pkg_buf: the package cmd buffer
808 * @buf_size: the size of the package cmd buffer
809 * @last_buf: last buffer indicator
810 * @error_offset: returns error offset
811 * @error_info: returns error information
812 * @cd: pointer to command details structure or NULL
814 * Update Package (0x0C42)
816 static enum ice_status
817 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
818 bool last_buf, u32 *error_offset, u32 *error_info,
819 struct ice_sq_cd *cd)
821 struct ice_aqc_download_pkg *cmd;
822 struct ice_aq_desc desc;
823 enum ice_status status;
830 cmd = &desc.params.download_pkg;
831 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
832 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
835 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
837 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
838 if (status == ICE_ERR_AQ_ERROR) {
839 /* Read error from buffer only when the FW returned an error */
840 struct ice_aqc_download_pkg_resp *resp;
842 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
844 *error_offset = le32_to_cpu(resp->error_offset);
846 *error_info = le32_to_cpu(resp->error_info);
853 * ice_find_seg_in_pkg
854 * @hw: pointer to the hardware structure
855 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
856 * @pkg_hdr: pointer to the package header to be searched
858 * This function searches a package file for a particular segment type. On
859 * success it returns a pointer to the segment header, otherwise it will
862 static struct ice_generic_seg_hdr *
863 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
864 struct ice_pkg_hdr *pkg_hdr)
868 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
869 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
870 pkg_hdr->pkg_format_ver.update,
871 pkg_hdr->pkg_format_ver.draft);
873 /* Search all package segments for the requested segment type */
874 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
875 struct ice_generic_seg_hdr *seg;
877 seg = (struct ice_generic_seg_hdr *)
878 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
880 if (le32_to_cpu(seg->seg_type) == seg_type)
889 * @hw: pointer to the hardware structure
890 * @bufs: pointer to an array of buffers
891 * @count: the number of buffers in the array
893 * Obtains change lock and updates package.
895 static enum ice_status
896 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
898 enum ice_status status;
901 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
905 for (i = 0; i < count; i++) {
906 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
907 bool last = ((i + 1) == count);
909 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
910 last, &offset, &info, NULL);
913 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
914 status, offset, info);
919 ice_release_change_lock(hw);
926 * @hw: pointer to the hardware structure
927 * @bufs: pointer to an array of buffers
928 * @count: the number of buffers in the array
930 * Obtains global config lock and downloads the package configuration buffers
931 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
932 * found indicates that the rest of the buffers are all metadata buffers.
934 static enum ice_status
935 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
937 enum ice_status status;
938 struct ice_buf_hdr *bh;
942 return ICE_ERR_PARAM;
944 /* If the first buffer's first section has its metadata bit set
945 * then there are no buffers to be downloaded, and the operation is
946 * considered a success.
948 bh = (struct ice_buf_hdr *)bufs;
949 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
952 /* reset pkg_dwnld_status in case this function is called in the
955 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
957 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
959 if (status == ICE_ERR_AQ_NO_WORK)
960 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
962 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
966 for (i = 0; i < count; i++) {
967 bool last = ((i + 1) == count);
970 /* check next buffer for metadata flag */
971 bh = (struct ice_buf_hdr *)(bufs + i + 1);
973 /* A set metadata flag in the next buffer will signal
974 * that the current buffer will be the last buffer
977 if (le16_to_cpu(bh->section_count))
978 if (le32_to_cpu(bh->section_entry[0].type) &
983 bh = (struct ice_buf_hdr *)(bufs + i);
985 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 &offset, &info, NULL);
988 /* Save AQ status from download package */
989 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
991 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
992 status, offset, info);
1001 ice_release_global_cfg_lock(hw);
1007 * ice_aq_get_pkg_info_list
1008 * @hw: pointer to the hardware structure
1009 * @pkg_info: the buffer which will receive the information list
1010 * @buf_size: the size of the pkg_info information buffer
1011 * @cd: pointer to command details structure or NULL
1013 * Get Package Info List (0x0C43)
1015 static enum ice_status
1016 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 u16 buf_size, struct ice_sq_cd *cd)
1020 struct ice_aq_desc desc;
1022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1024 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1029 * @hw: pointer to the hardware structure
1030 * @ice_seg: pointer to the segment of the package to be downloaded
1032 * Handles the download of a complete package.
1034 static enum ice_status
1035 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1037 struct ice_buf_table *ice_buf_tbl;
1039 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 ice_seg->hdr.seg_format_ver.major,
1041 ice_seg->hdr.seg_format_ver.minor,
1042 ice_seg->hdr.seg_format_ver.update,
1043 ice_seg->hdr.seg_format_ver.draft);
1045 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 le32_to_cpu(ice_seg->hdr.seg_type),
1047 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1049 ice_buf_tbl = ice_find_buf_table(ice_seg);
1051 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 le32_to_cpu(ice_buf_tbl->buf_count));
1054 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 le32_to_cpu(ice_buf_tbl->buf_count));
1060 * @hw: pointer to the hardware structure
1061 * @pkg_hdr: pointer to the driver's package hdr
1063 * Saves off the package details into the HW structure.
1065 static enum ice_status
1066 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1068 struct ice_generic_seg_hdr *seg_hdr;
1071 return ICE_ERR_PARAM;
1073 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1075 struct ice_meta_sect *meta;
1076 struct ice_pkg_enum state;
1078 memset(&state, 0, sizeof(state));
1080 /* Get package information from the Metadata Section */
1081 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1084 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1088 hw->pkg_ver = meta->ver;
1089 memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1091 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092 meta->ver.major, meta->ver.minor, meta->ver.update,
1093 meta->ver.draft, meta->name);
1095 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096 memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097 sizeof(hw->ice_seg_id));
1099 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100 seg_hdr->seg_format_ver.major,
1101 seg_hdr->seg_format_ver.minor,
1102 seg_hdr->seg_format_ver.update,
1103 seg_hdr->seg_format_ver.draft,
1106 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1115 * @hw: pointer to the hardware structure
1117 * Store details of the package currently loaded in HW into the HW structure.
1119 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1121 struct ice_aqc_get_pkg_info_resp *pkg_info;
1122 enum ice_status status;
1126 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127 pkg_info = kzalloc(size, GFP_KERNEL);
1129 return ICE_ERR_NO_MEMORY;
1131 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1133 goto init_pkg_free_alloc;
1135 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136 #define ICE_PKG_FLAG_COUNT 4
1137 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1140 if (pkg_info->pkg_info[i].is_active) {
1141 flags[place++] = 'A';
1142 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 hw->active_track_id =
1144 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145 memcpy(hw->active_pkg_name,
1146 pkg_info->pkg_info[i].name,
1147 sizeof(pkg_info->pkg_info[i].name));
1148 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1150 if (pkg_info->pkg_info[i].is_active_at_boot)
1151 flags[place++] = 'B';
1152 if (pkg_info->pkg_info[i].is_modified)
1153 flags[place++] = 'M';
1154 if (pkg_info->pkg_info[i].is_in_nvm)
1155 flags[place++] = 'N';
1157 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158 i, pkg_info->pkg_info[i].ver.major,
1159 pkg_info->pkg_info[i].ver.minor,
1160 pkg_info->pkg_info[i].ver.update,
1161 pkg_info->pkg_info[i].ver.draft,
1162 pkg_info->pkg_info[i].name, flags);
1165 init_pkg_free_alloc:
1172 * ice_verify_pkg - verify package
1173 * @pkg: pointer to the package buffer
1174 * @len: size of the package buffer
1176 * Verifies various attributes of the package file, including length, format
1177 * version, and the requirement of at least one segment.
1179 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1184 if (len < struct_size(pkg, seg_offset, 1))
1185 return ICE_ERR_BUF_TOO_SHORT;
1187 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1193 /* pkg must have at least one segment */
1194 seg_count = le32_to_cpu(pkg->seg_count);
1198 /* make sure segment array fits in package length */
1199 if (len < struct_size(pkg, seg_offset, seg_count))
1200 return ICE_ERR_BUF_TOO_SHORT;
1202 /* all segments must fit within length */
1203 for (i = 0; i < seg_count; i++) {
1204 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205 struct ice_generic_seg_hdr *seg;
1207 /* segment header must fit */
1208 if (len < off + sizeof(*seg))
1209 return ICE_ERR_BUF_TOO_SHORT;
1211 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1213 /* segment body must fit */
1214 if (len < off + le32_to_cpu(seg->seg_size))
1215 return ICE_ERR_BUF_TOO_SHORT;
1222 * ice_free_seg - free package segment pointer
1223 * @hw: pointer to the hardware structure
1225 * Frees the package segment pointer in the proper manner, depending on if the
1226 * segment was allocated or just the passed in pointer was stored.
1228 void ice_free_seg(struct ice_hw *hw)
1231 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232 hw->pkg_copy = NULL;
1239 * ice_init_pkg_regs - initialize additional package registers
1240 * @hw: pointer to the hardware structure
1242 static void ice_init_pkg_regs(struct ice_hw *hw)
1244 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246 #define ICE_SW_BLK_IDX 0
1248 /* setup Switch block input mask, which is 48-bits in two parts */
1249 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1254 * ice_chk_pkg_version - check package version for compatibility with driver
1255 * @pkg_ver: pointer to a version structure to check
1257 * Check to make sure that the package about to be downloaded is compatible with
1258 * the driver. To be compatible, the major and minor components of the package
1259 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1262 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1264 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266 return ICE_ERR_NOT_SUPPORTED;
1272 * ice_chk_pkg_compat
1273 * @hw: pointer to the hardware structure
1274 * @ospkg: pointer to the package hdr
1275 * @seg: pointer to the package segment hdr
1277 * This function checks the package version compatibility with driver and NVM
1279 static enum ice_status
1280 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281 struct ice_seg **seg)
1283 struct ice_aqc_get_pkg_info_resp *pkg;
1284 enum ice_status status;
1288 /* Check package version compatibility */
1289 status = ice_chk_pkg_version(&hw->pkg_ver);
1291 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1295 /* find ICE segment in given package */
1296 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1299 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1303 /* Check if FW is compatible with the OS package */
1304 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305 pkg = kzalloc(size, GFP_KERNEL);
1307 return ICE_ERR_NO_MEMORY;
1309 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1311 goto fw_ddp_compat_free_alloc;
1313 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314 /* loop till we find the NVM package */
1315 if (!pkg->pkg_info[i].is_in_nvm)
1317 if ((*seg)->hdr.seg_format_ver.major !=
1318 pkg->pkg_info[i].ver.major ||
1319 (*seg)->hdr.seg_format_ver.minor >
1320 pkg->pkg_info[i].ver.minor) {
1321 status = ICE_ERR_FW_DDP_MISMATCH;
1322 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1324 /* done processing NVM package so break */
1327 fw_ddp_compat_free_alloc:
1333 * ice_init_pkg - initialize/download package
1334 * @hw: pointer to the hardware structure
1335 * @buf: pointer to the package buffer
1336 * @len: size of the package buffer
1338 * This function initializes a package. The package contains HW tables
1339 * required to do packet processing. First, the function extracts package
1340 * information such as version. Then it finds the ice configuration segment
1341 * within the package; this function then saves a copy of the segment pointer
1342 * within the supplied package buffer. Next, the function will cache any hints
1343 * from the package, followed by downloading the package itself. Note, that if
1344 * a previous PF driver has already downloaded the package successfully, then
1345 * the current driver will not have to download the package again.
1347 * The local package contents will be used to query default behavior and to
1348 * update specific sections of the HW's version of the package (e.g. to update
1349 * the parse graph to understand new protocols).
1351 * This function stores a pointer to the package buffer memory, and it is
1352 * expected that the supplied buffer will not be freed immediately. If the
1353 * package buffer needs to be freed, such as when read from a file, use
1354 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1357 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1359 struct ice_pkg_hdr *pkg;
1360 enum ice_status status;
1361 struct ice_seg *seg;
1364 return ICE_ERR_PARAM;
1366 pkg = (struct ice_pkg_hdr *)buf;
1367 status = ice_verify_pkg(pkg, len);
1369 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1374 /* initialize package info */
1375 status = ice_init_pkg_info(hw, pkg);
1379 /* before downloading the package, check package version for
1380 * compatibility with driver
1382 status = ice_chk_pkg_compat(hw, pkg, &seg);
1386 /* initialize package hints and then download package */
1387 ice_init_pkg_hints(hw, seg);
1388 status = ice_download_pkg(hw, seg);
1389 if (status == ICE_ERR_AQ_NO_WORK) {
1390 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1394 /* Get information on the package currently loaded in HW, then make sure
1395 * the driver is compatible with this version.
1398 status = ice_get_pkg_info(hw);
1400 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1405 /* on successful package download update other required
1406 * registers to support the package and fill HW tables
1407 * with package content.
1409 ice_init_pkg_regs(hw);
1410 ice_fill_blk_tbls(hw);
1412 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1420 * ice_copy_and_init_pkg - initialize/download a copy of the package
1421 * @hw: pointer to the hardware structure
1422 * @buf: pointer to the package buffer
1423 * @len: size of the package buffer
1425 * This function copies the package buffer, and then calls ice_init_pkg() to
1426 * initialize the copied package contents.
1428 * The copying is necessary if the package buffer supplied is constant, or if
1429 * the memory may disappear shortly after calling this function.
1431 * If the package buffer resides in the data segment and can be modified, the
1432 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1434 * However, if the package buffer needs to be copied first, such as when being
1435 * read from a file, the caller should use ice_copy_and_init_pkg().
1437 * This function will first copy the package buffer, before calling
1438 * ice_init_pkg(). The caller is free to immediately destroy the original
1439 * package buffer, as the new copy will be managed by this function and
1442 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1444 enum ice_status status;
1448 return ICE_ERR_PARAM;
1450 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1452 status = ice_init_pkg(hw, buf_copy, len);
1454 /* Free the copy, since we failed to initialize the package */
1455 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1457 /* Track the copied pkg so we can free it later */
1458 hw->pkg_copy = buf_copy;
1467 * @hw: pointer to the HW structure
1469 * Allocates a package buffer and returns a pointer to the buffer header.
1470 * Note: all package contents must be in Little Endian form.
1472 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1474 struct ice_buf_build *bld;
1475 struct ice_buf_hdr *buf;
1477 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1481 buf = (struct ice_buf_hdr *)bld;
1482 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1489 * @hw: pointer to the HW structure
1490 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1492 * Frees a package buffer
1494 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1496 devm_kfree(ice_hw_to_dev(hw), bld);
1500 * ice_pkg_buf_reserve_section
1501 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1502 * @count: the number of sections to reserve
1504 * Reserves one or more section table entries in a package buffer. This routine
1505 * can be called multiple times as long as they are made before calling
1506 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1507 * is called once, the number of sections that can be allocated will not be able
1508 * to be increased; not using all reserved sections is fine, but this will
1509 * result in some wasted space in the buffer.
1510 * Note: all package contents must be in Little Endian form.
1512 static enum ice_status
1513 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1515 struct ice_buf_hdr *buf;
1520 return ICE_ERR_PARAM;
1522 buf = (struct ice_buf_hdr *)&bld->buf;
1524 /* already an active section, can't increase table size */
1525 section_count = le16_to_cpu(buf->section_count);
1526 if (section_count > 0)
1529 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1531 bld->reserved_section_table_entries += count;
1533 data_end = le16_to_cpu(buf->data_end) +
1534 flex_array_size(buf, section_entry, count);
1535 buf->data_end = cpu_to_le16(data_end);
1541 * ice_pkg_buf_alloc_section
1542 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1543 * @type: the section type value
1544 * @size: the size of the section to reserve (in bytes)
1546 * Reserves memory in the buffer for a section's content and updates the
1547 * buffers' status accordingly. This routine returns a pointer to the first
1548 * byte of the section start within the buffer, which is used to fill in the
1550 * Note: all package contents must be in Little Endian form.
1553 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1555 struct ice_buf_hdr *buf;
1559 if (!bld || !type || !size)
1562 buf = (struct ice_buf_hdr *)&bld->buf;
1564 /* check for enough space left in buffer */
1565 data_end = le16_to_cpu(buf->data_end);
1567 /* section start must align on 4 byte boundary */
1568 data_end = ALIGN(data_end, 4);
1570 if ((data_end + size) > ICE_MAX_S_DATA_END)
1573 /* check for more available section table entries */
1574 sect_count = le16_to_cpu(buf->section_count);
1575 if (sect_count < bld->reserved_section_table_entries) {
1576 void *section_ptr = ((u8 *)buf) + data_end;
1578 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1579 buf->section_entry[sect_count].size = cpu_to_le16(size);
1580 buf->section_entry[sect_count].type = cpu_to_le32(type);
1583 buf->data_end = cpu_to_le16(data_end);
1585 buf->section_count = cpu_to_le16(sect_count + 1);
1589 /* no free section table entries */
1594 * ice_pkg_buf_get_active_sections
1595 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1597 * Returns the number of active sections. Before using the package buffer
1598 * in an update package command, the caller should make sure that there is at
1599 * least one active section - otherwise, the buffer is not legal and should
1601 * Note: all package contents must be in Little Endian form.
1603 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1605 struct ice_buf_hdr *buf;
1610 buf = (struct ice_buf_hdr *)&bld->buf;
1611 return le16_to_cpu(buf->section_count);
1616 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1618 * Return a pointer to the buffer's header
1620 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1629 * ice_get_open_tunnel_port - retrieve an open tunnel port
1630 * @hw: pointer to the HW structure
1631 * @port: returns open port
1634 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1639 mutex_lock(&hw->tnl_lock);
1641 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1642 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1643 *port = hw->tnl.tbl[i].port;
1648 mutex_unlock(&hw->tnl_lock);
1654 * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1655 * @hw: pointer to the HW structure
1656 * @type: type of tunnel
1657 * @idx: linear index
1659 * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1660 * but really the port table may be sprase, and types are mixed, so convert
1661 * the stack index into the device index.
1663 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1668 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1669 if (hw->tnl.tbl[i].valid &&
1670 hw->tnl.tbl[i].type == type &&
1680 * @hw: pointer to the HW structure
1681 * @index: device table entry
1682 * @type: type of tunnel
1683 * @port: port of tunnel to create
1685 * Create a tunnel by updating the parse graph in the parser. We do that by
1686 * creating a package buffer with the tunnel info and issuing an update package
1689 static enum ice_status
1690 ice_create_tunnel(struct ice_hw *hw, u16 index,
1691 enum ice_tunnel_type type, u16 port)
1693 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1694 enum ice_status status = ICE_ERR_MAX_LIMIT;
1695 struct ice_buf_build *bld;
1697 mutex_lock(&hw->tnl_lock);
1699 bld = ice_pkg_buf_alloc(hw);
1701 status = ICE_ERR_NO_MEMORY;
1702 goto ice_create_tunnel_end;
1705 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1706 if (ice_pkg_buf_reserve_section(bld, 2))
1707 goto ice_create_tunnel_err;
1709 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1710 struct_size(sect_rx, tcam, 1));
1712 goto ice_create_tunnel_err;
1713 sect_rx->count = cpu_to_le16(1);
1715 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1716 struct_size(sect_tx, tcam, 1));
1718 goto ice_create_tunnel_err;
1719 sect_tx->count = cpu_to_le16(1);
1721 /* copy original boost entry to update package buffer */
1722 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1723 sizeof(*sect_rx->tcam));
1725 /* over-write the never-match dest port key bits with the encoded port
1728 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1729 (u8 *)&port, NULL, NULL, NULL,
1730 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1731 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1733 /* exact copy of entry to Tx section entry */
1734 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1736 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1738 hw->tnl.tbl[index].port = port;
1740 ice_create_tunnel_err:
1741 ice_pkg_buf_free(hw, bld);
1743 ice_create_tunnel_end:
1744 mutex_unlock(&hw->tnl_lock);
1750 * ice_destroy_tunnel
1751 * @hw: pointer to the HW structure
1752 * @index: device table entry
1753 * @type: type of tunnel
1754 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1756 * Destroys a tunnel or all tunnels by creating an update package buffer
1757 * targeting the specific updates requested and then performing an update
1760 static enum ice_status
1761 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
1764 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1765 enum ice_status status = ICE_ERR_MAX_LIMIT;
1766 struct ice_buf_build *bld;
1768 mutex_lock(&hw->tnl_lock);
1770 if (WARN_ON(!hw->tnl.tbl[index].valid ||
1771 hw->tnl.tbl[index].type != type ||
1772 hw->tnl.tbl[index].port != port)) {
1773 status = ICE_ERR_OUT_OF_RANGE;
1774 goto ice_destroy_tunnel_end;
1777 bld = ice_pkg_buf_alloc(hw);
1779 status = ICE_ERR_NO_MEMORY;
1780 goto ice_destroy_tunnel_end;
1783 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1784 if (ice_pkg_buf_reserve_section(bld, 2))
1785 goto ice_destroy_tunnel_err;
1787 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1788 struct_size(sect_rx, tcam, 1));
1790 goto ice_destroy_tunnel_err;
1791 sect_rx->count = cpu_to_le16(1);
1793 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1794 struct_size(sect_tx, tcam, 1));
1796 goto ice_destroy_tunnel_err;
1797 sect_tx->count = cpu_to_le16(1);
1799 /* copy original boost entry to update package buffer, one copy to Rx
1800 * section, another copy to the Tx section
1802 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1803 sizeof(*sect_rx->tcam));
1804 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
1805 sizeof(*sect_tx->tcam));
1807 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1809 hw->tnl.tbl[index].port = 0;
1811 ice_destroy_tunnel_err:
1812 ice_pkg_buf_free(hw, bld);
1814 ice_destroy_tunnel_end:
1815 mutex_unlock(&hw->tnl_lock);
1820 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
1821 unsigned int idx, struct udp_tunnel_info *ti)
1823 struct ice_netdev_priv *np = netdev_priv(netdev);
1824 struct ice_vsi *vsi = np->vsi;
1825 struct ice_pf *pf = vsi->back;
1826 enum ice_tunnel_type tnl_type;
1827 enum ice_status status;
1830 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1831 index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
1833 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
1835 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
1836 ice_stat_str(status));
1840 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
1844 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
1845 unsigned int idx, struct udp_tunnel_info *ti)
1847 struct ice_netdev_priv *np = netdev_priv(netdev);
1848 struct ice_vsi *vsi = np->vsi;
1849 struct ice_pf *pf = vsi->back;
1850 enum ice_tunnel_type tnl_type;
1851 enum ice_status status;
1853 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1855 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
1858 netdev_err(netdev, "Error removing UDP tunnel - %s\n",
1859 ice_stat_str(status));
1866 /* PTG Management */
1869 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1870 * @hw: pointer to the hardware structure
1872 * @ptype: the ptype to search for
1873 * @ptg: pointer to variable that receives the PTG
1875 * This function will search the PTGs for a particular ptype, returning the
1876 * PTG ID that contains it through the PTG parameter, with the value of
1877 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1879 static enum ice_status
1880 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1882 if (ptype >= ICE_XLT1_CNT || !ptg)
1883 return ICE_ERR_PARAM;
1885 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1890 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1891 * @hw: pointer to the hardware structure
1893 * @ptg: the PTG to allocate
1895 * This function allocates a given packet type group ID specified by the PTG
1898 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1900 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1904 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
1905 * @hw: pointer to the hardware structure
1907 * @ptype: the ptype to remove
1908 * @ptg: the PTG to remove the ptype from
1910 * This function will remove the ptype from the specific PTG, and move it to
1911 * the default PTG (ICE_DEFAULT_PTG).
1913 static enum ice_status
1914 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1916 struct ice_ptg_ptype **ch;
1917 struct ice_ptg_ptype *p;
1919 if (ptype > ICE_XLT1_CNT - 1)
1920 return ICE_ERR_PARAM;
1922 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1923 return ICE_ERR_DOES_NOT_EXIST;
1925 /* Should not happen if .in_use is set, bad config */
1926 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1929 /* find the ptype within this PTG, and bypass the link over it */
1930 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1931 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1933 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1934 *ch = p->next_ptype;
1938 ch = &p->next_ptype;
1942 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1943 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1949 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
1950 * @hw: pointer to the hardware structure
1952 * @ptype: the ptype to add or move
1953 * @ptg: the PTG to add or move the ptype to
1955 * This function will either add or move a ptype to a particular PTG depending
1956 * on if the ptype is already part of another group. Note that using a
1957 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
1960 static enum ice_status
1961 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1963 enum ice_status status;
1966 if (ptype > ICE_XLT1_CNT - 1)
1967 return ICE_ERR_PARAM;
1969 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
1970 return ICE_ERR_DOES_NOT_EXIST;
1972 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
1976 /* Is ptype already in the correct PTG? */
1977 if (original_ptg == ptg)
1980 /* Remove from original PTG and move back to the default PTG */
1981 if (original_ptg != ICE_DEFAULT_PTG)
1982 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
1984 /* Moving to default PTG? Then we're done with this request */
1985 if (ptg == ICE_DEFAULT_PTG)
1988 /* Add ptype to PTG at beginning of list */
1989 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
1990 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1991 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
1992 &hw->blk[blk].xlt1.ptypes[ptype];
1994 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
1995 hw->blk[blk].xlt1.t[ptype] = ptg;
2000 /* Block / table size info */
2001 struct ice_blk_size_details {
2002 u16 xlt1; /* # XLT1 entries */
2003 u16 xlt2; /* # XLT2 entries */
2004 u16 prof_tcam; /* # profile ID TCAM entries */
2005 u16 prof_id; /* # profile IDs */
2006 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2007 u16 prof_redir; /* # profile redirection entries */
2008 u16 es; /* # extraction sequence entries */
2009 u16 fvw; /* # field vector words */
2010 u8 overwrite; /* overwrite existing entries allowed */
2011 u8 reverse; /* reverse FV order */
2014 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2017 * XLT1 - Number of entries in XLT1 table
2018 * XLT2 - Number of entries in XLT2 table
2019 * TCAM - Number of entries Profile ID TCAM table
2020 * CDID - Control Domain ID of the hardware block
2021 * PRED - Number of entries in the Profile Redirection Table
2022 * FV - Number of entries in the Field Vector
2023 * FVW - Width (in WORDs) of the Field Vector
2024 * OVR - Overwrite existing table entries
2027 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2028 /* Overwrite , Reverse FV */
2029 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2031 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2033 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2035 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2037 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2042 ICE_SID_XLT1_OFF = 0,
2045 ICE_SID_PR_REDIR_OFF,
2050 /* Characteristic handling */
2053 * ice_match_prop_lst - determine if properties of two lists match
2054 * @list1: first properties list
2055 * @list2: second properties list
2057 * Count, cookies and the order must match in order to be considered equivalent.
2060 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2062 struct ice_vsig_prof *tmp1;
2063 struct ice_vsig_prof *tmp2;
2067 /* compare counts */
2068 list_for_each_entry(tmp1, list1, list)
2070 list_for_each_entry(tmp2, list2, list)
2072 /* cppcheck-suppress knownConditionTrueFalse */
2073 if (!count || count != chk_count)
2076 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2077 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2079 /* profile cookies must compare, and in the exact same order to take
2080 * into account priority
2083 if (tmp2->profile_cookie != tmp1->profile_cookie)
2086 tmp1 = list_next_entry(tmp1, list);
2087 tmp2 = list_next_entry(tmp2, list);
2093 /* VSIG Management */
2096 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2097 * @hw: pointer to the hardware structure
2099 * @vsi: VSI of interest
2100 * @vsig: pointer to receive the VSI group
2102 * This function will lookup the VSI entry in the XLT2 list and return
2103 * the VSI group its associated with.
2105 static enum ice_status
2106 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2108 if (!vsig || vsi >= ICE_MAX_VSI)
2109 return ICE_ERR_PARAM;
2111 /* As long as there's a default or valid VSIG associated with the input
2112 * VSI, the functions returns a success. Any handling of VSIG will be
2113 * done by the following add, update or remove functions.
2115 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2121 * ice_vsig_alloc_val - allocate a new VSIG by value
2122 * @hw: pointer to the hardware structure
2124 * @vsig: the VSIG to allocate
2126 * This function will allocate a given VSIG specified by the VSIG parameter.
2128 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2130 u16 idx = vsig & ICE_VSIG_IDX_M;
2132 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2133 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2134 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2137 return ICE_VSIG_VALUE(idx, hw->pf_id);
2141 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2142 * @hw: pointer to the hardware structure
2145 * This function will iterate through the VSIG list and mark the first
2146 * unused entry for the new VSIG entry as used and return that value.
2148 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2152 for (i = 1; i < ICE_MAX_VSIGS; i++)
2153 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2154 return ice_vsig_alloc_val(hw, blk, i);
2156 return ICE_DEFAULT_VSIG;
2160 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2161 * @hw: pointer to the hardware structure
2163 * @chs: characteristic list
2164 * @vsig: returns the VSIG with the matching profiles, if found
2166 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2167 * a group have the same characteristic set. To check if there exists a VSIG
2168 * which has the same characteristics as the input characteristics; this
2169 * function will iterate through the XLT2 list and return the VSIG that has a
2170 * matching configuration. In order to make sure that priorities are accounted
2171 * for, the list must match exactly, including the order in which the
2172 * characteristics are listed.
2174 static enum ice_status
2175 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2176 struct list_head *chs, u16 *vsig)
2178 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2181 for (i = 0; i < xlt2->count; i++)
2182 if (xlt2->vsig_tbl[i].in_use &&
2183 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2184 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2188 return ICE_ERR_DOES_NOT_EXIST;
2192 * ice_vsig_free - free VSI group
2193 * @hw: pointer to the hardware structure
2195 * @vsig: VSIG to remove
2197 * The function will remove all VSIs associated with the input VSIG and move
2198 * them to the DEFAULT_VSIG and mark the VSIG available.
2200 static enum ice_status
2201 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2203 struct ice_vsig_prof *dtmp, *del;
2204 struct ice_vsig_vsi *vsi_cur;
2207 idx = vsig & ICE_VSIG_IDX_M;
2208 if (idx >= ICE_MAX_VSIGS)
2209 return ICE_ERR_PARAM;
2211 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2212 return ICE_ERR_DOES_NOT_EXIST;
2214 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2216 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2217 /* If the VSIG has at least 1 VSI then iterate through the
2218 * list and remove the VSIs before deleting the group.
2221 /* remove all vsis associated with this VSIG XLT2 entry */
2223 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2225 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2226 vsi_cur->changed = 1;
2227 vsi_cur->next_vsi = NULL;
2231 /* NULL terminate head of VSI list */
2232 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2235 /* free characteristic list */
2236 list_for_each_entry_safe(del, dtmp,
2237 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2239 list_del(&del->list);
2240 devm_kfree(ice_hw_to_dev(hw), del);
2243 /* if VSIG characteristic list was cleared for reset
2244 * re-initialize the list head
2246 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2252 * ice_vsig_remove_vsi - remove VSI from VSIG
2253 * @hw: pointer to the hardware structure
2255 * @vsi: VSI to remove
2256 * @vsig: VSI group to remove from
2258 * The function will remove the input VSI from its VSI group and move it
2259 * to the DEFAULT_VSIG.
2261 static enum ice_status
2262 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2264 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2267 idx = vsig & ICE_VSIG_IDX_M;
2269 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2270 return ICE_ERR_PARAM;
2272 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2273 return ICE_ERR_DOES_NOT_EXIST;
2275 /* entry already in default VSIG, don't have to remove */
2276 if (idx == ICE_DEFAULT_VSIG)
2279 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2283 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2284 vsi_cur = (*vsi_head);
2286 /* iterate the VSI list, skip over the entry to be removed */
2288 if (vsi_tgt == vsi_cur) {
2289 (*vsi_head) = vsi_cur->next_vsi;
2292 vsi_head = &vsi_cur->next_vsi;
2293 vsi_cur = vsi_cur->next_vsi;
2296 /* verify if VSI was removed from group list */
2298 return ICE_ERR_DOES_NOT_EXIST;
2300 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2301 vsi_cur->changed = 1;
2302 vsi_cur->next_vsi = NULL;
2308 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2309 * @hw: pointer to the hardware structure
2312 * @vsig: destination VSI group
2314 * This function will move or add the input VSI to the target VSIG.
2315 * The function will find the original VSIG the VSI belongs to and
2316 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2317 * then move entry to the new VSIG.
2319 static enum ice_status
2320 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2322 struct ice_vsig_vsi *tmp;
2323 enum ice_status status;
2326 idx = vsig & ICE_VSIG_IDX_M;
2328 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2329 return ICE_ERR_PARAM;
2331 /* if VSIG not in use and VSIG is not default type this VSIG
2334 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2335 vsig != ICE_DEFAULT_VSIG)
2336 return ICE_ERR_DOES_NOT_EXIST;
2338 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2342 /* no update required if vsigs match */
2343 if (orig_vsig == vsig)
2346 if (orig_vsig != ICE_DEFAULT_VSIG) {
2347 /* remove entry from orig_vsig and add to default VSIG */
2348 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2353 if (idx == ICE_DEFAULT_VSIG)
2356 /* Create VSI entry and add VSIG and prop_mask values */
2357 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2358 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2360 /* Add new entry to the head of the VSIG list */
2361 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2362 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2363 &hw->blk[blk].xlt2.vsis[vsi];
2364 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2365 hw->blk[blk].xlt2.t[vsi] = vsig;
2371 * ice_prof_has_mask_idx - determine if profile index masking is identical
2372 * @hw: pointer to the hardware structure
2374 * @prof: profile to check
2375 * @idx: profile index to check
2376 * @mask: mask to match
2379 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2382 bool expect_no_mask = false;
2387 /* If mask is 0x0000 or 0xffff, then there is no masking */
2388 if (mask == 0 || mask == 0xffff)
2389 expect_no_mask = true;
2391 /* Scan the enabled masks on this profile, for the specified idx */
2392 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2393 hw->blk[blk].masks.count; i++)
2394 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2395 if (hw->blk[blk].masks.masks[i].in_use &&
2396 hw->blk[blk].masks.masks[i].idx == idx) {
2398 if (hw->blk[blk].masks.masks[i].mask == mask)
2403 if (expect_no_mask) {
2415 * ice_prof_has_mask - determine if profile masking is identical
2416 * @hw: pointer to the hardware structure
2418 * @prof: profile to check
2419 * @masks: masks to match
2422 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2426 /* es->mask_ena[prof] will have the mask */
2427 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2428 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2435 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2436 * @hw: pointer to the hardware structure
2438 * @fv: field vector to search for
2439 * @masks: masks for FV
2440 * @prof_id: receives the profile ID
2442 static enum ice_status
2443 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2444 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2446 struct ice_es *es = &hw->blk[blk].es;
2449 /* For FD, we don't want to re-use a existed profile with the same
2450 * field vector and mask. This will cause rule interference.
2452 if (blk == ICE_BLK_FD)
2453 return ICE_ERR_DOES_NOT_EXIST;
2455 for (i = 0; i < (u8)es->count; i++) {
2456 u16 off = i * es->fvw;
2458 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2461 /* check if masks settings are the same for this profile */
2462 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2469 return ICE_ERR_DOES_NOT_EXIST;
2473 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2474 * @blk: the block type
2475 * @rsrc_type: pointer to variable to receive the resource type
2477 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2481 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2484 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2493 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2494 * @blk: the block type
2495 * @rsrc_type: pointer to variable to receive the resource type
2497 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2501 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2504 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2513 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2514 * @hw: pointer to the HW struct
2515 * @blk: the block to allocate the TCAM for
2516 * @btm: true to allocate from bottom of table, false to allocate from top
2517 * @tcam_idx: pointer to variable to receive the TCAM entry
2519 * This function allocates a new entry in a Profile ID TCAM for a specific
2522 static enum ice_status
2523 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2528 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2529 return ICE_ERR_PARAM;
2531 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2535 * ice_free_tcam_ent - free hardware TCAM entry
2536 * @hw: pointer to the HW struct
2537 * @blk: the block from which to free the TCAM entry
2538 * @tcam_idx: the TCAM entry to free
2540 * This function frees an entry in a Profile ID TCAM for a specific block.
2542 static enum ice_status
2543 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2547 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2548 return ICE_ERR_PARAM;
2550 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2554 * ice_alloc_prof_id - allocate profile ID
2555 * @hw: pointer to the HW struct
2556 * @blk: the block to allocate the profile ID for
2557 * @prof_id: pointer to variable to receive the profile ID
2559 * This function allocates a new profile ID, which also corresponds to a Field
2560 * Vector (Extraction Sequence) entry.
2562 static enum ice_status
2563 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2565 enum ice_status status;
2569 if (!ice_prof_id_rsrc_type(blk, &res_type))
2570 return ICE_ERR_PARAM;
2572 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2574 *prof_id = (u8)get_prof;
2580 * ice_free_prof_id - free profile ID
2581 * @hw: pointer to the HW struct
2582 * @blk: the block from which to free the profile ID
2583 * @prof_id: the profile ID to free
2585 * This function frees a profile ID, which also corresponds to a Field Vector.
2587 static enum ice_status
2588 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2590 u16 tmp_prof_id = (u16)prof_id;
2593 if (!ice_prof_id_rsrc_type(blk, &res_type))
2594 return ICE_ERR_PARAM;
2596 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2600 * ice_prof_inc_ref - increment reference count for profile
2601 * @hw: pointer to the HW struct
2602 * @blk: the block from which to free the profile ID
2603 * @prof_id: the profile ID for which to increment the reference count
2605 static enum ice_status
2606 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2608 if (prof_id > hw->blk[blk].es.count)
2609 return ICE_ERR_PARAM;
2611 hw->blk[blk].es.ref_count[prof_id]++;
2617 * ice_write_prof_mask_reg - write profile mask register
2618 * @hw: pointer to the HW struct
2619 * @blk: hardware block
2620 * @mask_idx: mask index
2621 * @idx: index of the FV which will use the mask
2622 * @mask: the 16-bit mask
2625 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2633 offset = GLQF_HMASK(mask_idx);
2634 val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2635 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2638 offset = GLQF_FDMASK(mask_idx);
2639 val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2640 val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2643 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2648 wr32(hw, offset, val);
2649 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2650 blk, idx, offset, val);
2654 * ice_write_prof_mask_enable_res - write profile mask enable register
2655 * @hw: pointer to the HW struct
2656 * @blk: hardware block
2657 * @prof_id: profile ID
2658 * @enable_mask: enable mask
2661 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2662 u16 prof_id, u32 enable_mask)
2668 offset = GLQF_HMASK_SEL(prof_id);
2671 offset = GLQF_FDMASK_SEL(prof_id);
2674 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2679 wr32(hw, offset, enable_mask);
2680 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2681 blk, prof_id, offset, enable_mask);
2685 * ice_init_prof_masks - initial prof masks
2686 * @hw: pointer to the HW struct
2687 * @blk: hardware block
2689 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2694 mutex_init(&hw->blk[blk].masks.lock);
2696 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2698 hw->blk[blk].masks.count = per_pf;
2699 hw->blk[blk].masks.first = hw->pf_id * per_pf;
2701 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
2703 for (i = hw->blk[blk].masks.first;
2704 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2705 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2709 * ice_init_all_prof_masks - initialize all prof masks
2710 * @hw: pointer to the HW struct
2712 static void ice_init_all_prof_masks(struct ice_hw *hw)
2714 ice_init_prof_masks(hw, ICE_BLK_RSS);
2715 ice_init_prof_masks(hw, ICE_BLK_FD);
2719 * ice_alloc_prof_mask - allocate profile mask
2720 * @hw: pointer to the HW struct
2721 * @blk: hardware block
2722 * @idx: index of FV which will use the mask
2723 * @mask: the 16-bit mask
2724 * @mask_idx: variable to receive the mask index
2726 static enum ice_status
2727 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
2730 bool found_unused = false, found_copy = false;
2731 enum ice_status status = ICE_ERR_MAX_LIMIT;
2732 u16 unused_idx = 0, copy_idx = 0;
2735 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2736 return ICE_ERR_PARAM;
2738 mutex_lock(&hw->blk[blk].masks.lock);
2740 for (i = hw->blk[blk].masks.first;
2741 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2742 if (hw->blk[blk].masks.masks[i].in_use) {
2743 /* if mask is in use and it exactly duplicates the
2744 * desired mask and index, then in can be reused
2746 if (hw->blk[blk].masks.masks[i].mask == mask &&
2747 hw->blk[blk].masks.masks[i].idx == idx) {
2753 /* save off unused index, but keep searching in case
2754 * there is an exact match later on
2756 if (!found_unused) {
2757 found_unused = true;
2764 else if (found_unused)
2767 goto err_ice_alloc_prof_mask;
2769 /* update mask for a new entry */
2771 hw->blk[blk].masks.masks[i].in_use = true;
2772 hw->blk[blk].masks.masks[i].mask = mask;
2773 hw->blk[blk].masks.masks[i].idx = idx;
2774 hw->blk[blk].masks.masks[i].ref = 0;
2775 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
2778 hw->blk[blk].masks.masks[i].ref++;
2782 err_ice_alloc_prof_mask:
2783 mutex_unlock(&hw->blk[blk].masks.lock);
2789 * ice_free_prof_mask - free profile mask
2790 * @hw: pointer to the HW struct
2791 * @blk: hardware block
2792 * @mask_idx: index of mask
2794 static enum ice_status
2795 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
2797 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2798 return ICE_ERR_PARAM;
2800 if (!(mask_idx >= hw->blk[blk].masks.first &&
2801 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
2802 return ICE_ERR_DOES_NOT_EXIST;
2804 mutex_lock(&hw->blk[blk].masks.lock);
2806 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
2807 goto exit_ice_free_prof_mask;
2809 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
2810 hw->blk[blk].masks.masks[mask_idx].ref--;
2811 goto exit_ice_free_prof_mask;
2815 hw->blk[blk].masks.masks[mask_idx].in_use = false;
2816 hw->blk[blk].masks.masks[mask_idx].mask = 0;
2817 hw->blk[blk].masks.masks[mask_idx].idx = 0;
2819 /* update mask as unused entry */
2820 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
2822 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
2824 exit_ice_free_prof_mask:
2825 mutex_unlock(&hw->blk[blk].masks.lock);
2831 * ice_free_prof_masks - free all profile masks for a profile
2832 * @hw: pointer to the HW struct
2833 * @blk: hardware block
2834 * @prof_id: profile ID
2836 static enum ice_status
2837 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
2842 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2843 return ICE_ERR_PARAM;
2845 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
2846 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
2847 if (mask_bm & BIT(i))
2848 ice_free_prof_mask(hw, blk, i);
2854 * ice_shutdown_prof_masks - releases lock for masking
2855 * @hw: pointer to the HW struct
2856 * @blk: hardware block
2858 * This should be called before unloading the driver
2860 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
2864 mutex_lock(&hw->blk[blk].masks.lock);
2866 for (i = hw->blk[blk].masks.first;
2867 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
2868 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2870 hw->blk[blk].masks.masks[i].in_use = false;
2871 hw->blk[blk].masks.masks[i].idx = 0;
2872 hw->blk[blk].masks.masks[i].mask = 0;
2875 mutex_unlock(&hw->blk[blk].masks.lock);
2876 mutex_destroy(&hw->blk[blk].masks.lock);
2880 * ice_shutdown_all_prof_masks - releases all locks for masking
2881 * @hw: pointer to the HW struct
2883 * This should be called before unloading the driver
2885 static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
2887 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
2888 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
2892 * ice_update_prof_masking - set registers according to masking
2893 * @hw: pointer to the HW struct
2894 * @blk: hardware block
2895 * @prof_id: profile ID
2898 static enum ice_status
2899 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
2907 /* Only support FD and RSS masking, otherwise nothing to be done */
2908 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2911 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2912 if (masks[i] && masks[i] != 0xFFFF) {
2913 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
2914 ena_mask |= BIT(idx);
2916 /* not enough bitmaps */
2923 /* free any bitmaps we have allocated */
2924 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
2925 if (ena_mask & BIT(i))
2926 ice_free_prof_mask(hw, blk, i);
2928 return ICE_ERR_OUT_OF_RANGE;
2931 /* enable the masks for this profile */
2932 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
2934 /* store enabled masks with profile so that they can be freed later */
2935 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
2941 * ice_write_es - write an extraction sequence to hardware
2942 * @hw: pointer to the HW struct
2943 * @blk: the block in which to write the extraction sequence
2944 * @prof_id: the profile ID to write
2945 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
2948 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2949 struct ice_fv_word *fv)
2953 off = prof_id * hw->blk[blk].es.fvw;
2955 memset(&hw->blk[blk].es.t[off], 0,
2956 hw->blk[blk].es.fvw * sizeof(*fv));
2957 hw->blk[blk].es.written[prof_id] = false;
2959 memcpy(&hw->blk[blk].es.t[off], fv,
2960 hw->blk[blk].es.fvw * sizeof(*fv));
2965 * ice_prof_dec_ref - decrement reference count for profile
2966 * @hw: pointer to the HW struct
2967 * @blk: the block from which to free the profile ID
2968 * @prof_id: the profile ID for which to decrement the reference count
2970 static enum ice_status
2971 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2973 if (prof_id > hw->blk[blk].es.count)
2974 return ICE_ERR_PARAM;
2976 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2977 if (!--hw->blk[blk].es.ref_count[prof_id]) {
2978 ice_write_es(hw, blk, prof_id, NULL);
2979 ice_free_prof_masks(hw, blk, prof_id);
2980 return ice_free_prof_id(hw, blk, prof_id);
2987 /* Block / table section IDs */
2988 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2992 ICE_SID_PROFID_TCAM_SW,
2993 ICE_SID_PROFID_REDIR_SW,
3000 ICE_SID_PROFID_TCAM_ACL,
3001 ICE_SID_PROFID_REDIR_ACL,
3008 ICE_SID_PROFID_TCAM_FD,
3009 ICE_SID_PROFID_REDIR_FD,
3016 ICE_SID_PROFID_TCAM_RSS,
3017 ICE_SID_PROFID_REDIR_RSS,
3024 ICE_SID_PROFID_TCAM_PE,
3025 ICE_SID_PROFID_REDIR_PE,
3031 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3032 * @hw: pointer to the hardware structure
3033 * @blk: the HW block to initialize
3035 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3039 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3042 ptg = hw->blk[blk].xlt1.t[pt];
3043 if (ptg != ICE_DEFAULT_PTG) {
3044 ice_ptg_alloc_val(hw, blk, ptg);
3045 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3051 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3052 * @hw: pointer to the hardware structure
3053 * @blk: the HW block to initialize
3055 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3059 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3062 vsig = hw->blk[blk].xlt2.t[vsi];
3064 ice_vsig_alloc_val(hw, blk, vsig);
3065 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3066 /* no changes at this time, since this has been
3067 * initialized from the original package
3069 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3075 * ice_init_sw_db - init software database from HW tables
3076 * @hw: pointer to the hardware structure
3078 static void ice_init_sw_db(struct ice_hw *hw)
3082 for (i = 0; i < ICE_BLK_COUNT; i++) {
3083 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3084 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3089 * ice_fill_tbl - Reads content of a single table type into database
3090 * @hw: pointer to the hardware structure
3091 * @block_id: Block ID of the table to copy
3092 * @sid: Section ID of the table to copy
3094 * Will attempt to read the entire content of a given table of a single block
3095 * into the driver database. We assume that the buffer will always
3096 * be as large or larger than the data contained in the package. If
3097 * this condition is not met, there is most likely an error in the package
3100 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3102 u32 dst_len, sect_len, offset = 0;
3103 struct ice_prof_redir_section *pr;
3104 struct ice_prof_id_section *pid;
3105 struct ice_xlt1_section *xlt1;
3106 struct ice_xlt2_section *xlt2;
3107 struct ice_sw_fv_section *es;
3108 struct ice_pkg_enum state;
3112 /* if the HW segment pointer is null then the first iteration of
3113 * ice_pkg_enum_section() will fail. In this case the HW tables will
3114 * not be filled and return success.
3117 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3121 memset(&state, 0, sizeof(state));
3123 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3127 case ICE_SID_XLT1_SW:
3128 case ICE_SID_XLT1_FD:
3129 case ICE_SID_XLT1_RSS:
3130 case ICE_SID_XLT1_ACL:
3131 case ICE_SID_XLT1_PE:
3134 sect_len = le16_to_cpu(xlt1->count) *
3135 sizeof(*hw->blk[block_id].xlt1.t);
3136 dst = hw->blk[block_id].xlt1.t;
3137 dst_len = hw->blk[block_id].xlt1.count *
3138 sizeof(*hw->blk[block_id].xlt1.t);
3140 case ICE_SID_XLT2_SW:
3141 case ICE_SID_XLT2_FD:
3142 case ICE_SID_XLT2_RSS:
3143 case ICE_SID_XLT2_ACL:
3144 case ICE_SID_XLT2_PE:
3146 src = (__force u8 *)xlt2->value;
3147 sect_len = le16_to_cpu(xlt2->count) *
3148 sizeof(*hw->blk[block_id].xlt2.t);
3149 dst = (u8 *)hw->blk[block_id].xlt2.t;
3150 dst_len = hw->blk[block_id].xlt2.count *
3151 sizeof(*hw->blk[block_id].xlt2.t);
3153 case ICE_SID_PROFID_TCAM_SW:
3154 case ICE_SID_PROFID_TCAM_FD:
3155 case ICE_SID_PROFID_TCAM_RSS:
3156 case ICE_SID_PROFID_TCAM_ACL:
3157 case ICE_SID_PROFID_TCAM_PE:
3159 src = (u8 *)pid->entry;
3160 sect_len = le16_to_cpu(pid->count) *
3161 sizeof(*hw->blk[block_id].prof.t);
3162 dst = (u8 *)hw->blk[block_id].prof.t;
3163 dst_len = hw->blk[block_id].prof.count *
3164 sizeof(*hw->blk[block_id].prof.t);
3166 case ICE_SID_PROFID_REDIR_SW:
3167 case ICE_SID_PROFID_REDIR_FD:
3168 case ICE_SID_PROFID_REDIR_RSS:
3169 case ICE_SID_PROFID_REDIR_ACL:
3170 case ICE_SID_PROFID_REDIR_PE:
3172 src = pr->redir_value;
3173 sect_len = le16_to_cpu(pr->count) *
3174 sizeof(*hw->blk[block_id].prof_redir.t);
3175 dst = hw->blk[block_id].prof_redir.t;
3176 dst_len = hw->blk[block_id].prof_redir.count *
3177 sizeof(*hw->blk[block_id].prof_redir.t);
3179 case ICE_SID_FLD_VEC_SW:
3180 case ICE_SID_FLD_VEC_FD:
3181 case ICE_SID_FLD_VEC_RSS:
3182 case ICE_SID_FLD_VEC_ACL:
3183 case ICE_SID_FLD_VEC_PE:
3186 sect_len = (u32)(le16_to_cpu(es->count) *
3187 hw->blk[block_id].es.fvw) *
3188 sizeof(*hw->blk[block_id].es.t);
3189 dst = (u8 *)hw->blk[block_id].es.t;
3190 dst_len = (u32)(hw->blk[block_id].es.count *
3191 hw->blk[block_id].es.fvw) *
3192 sizeof(*hw->blk[block_id].es.t);
3198 /* if the section offset exceeds destination length, terminate
3201 if (offset > dst_len)
3204 /* if the sum of section size and offset exceed destination size
3205 * then we are out of bounds of the HW table size for that PF.
3206 * Changing section length to fill the remaining table space
3209 if ((offset + sect_len) > dst_len)
3210 sect_len = dst_len - offset;
3212 memcpy(dst + offset, src, sect_len);
3214 sect = ice_pkg_enum_section(NULL, &state, sid);
3219 * ice_fill_blk_tbls - Read package context for tables
3220 * @hw: pointer to the hardware structure
3222 * Reads the current package contents and populates the driver
3223 * database with the data iteratively for all advanced feature
3224 * blocks. Assume that the HW tables have been allocated.
3226 void ice_fill_blk_tbls(struct ice_hw *hw)
3230 for (i = 0; i < ICE_BLK_COUNT; i++) {
3231 enum ice_block blk_id = (enum ice_block)i;
3233 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3234 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3235 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3236 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3237 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3244 * ice_free_prof_map - free profile map
3245 * @hw: pointer to the hardware structure
3246 * @blk_idx: HW block index
3248 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3250 struct ice_es *es = &hw->blk[blk_idx].es;
3251 struct ice_prof_map *del, *tmp;
3253 mutex_lock(&es->prof_map_lock);
3254 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3255 list_del(&del->list);
3256 devm_kfree(ice_hw_to_dev(hw), del);
3258 INIT_LIST_HEAD(&es->prof_map);
3259 mutex_unlock(&es->prof_map_lock);
3263 * ice_free_flow_profs - free flow profile entries
3264 * @hw: pointer to the hardware structure
3265 * @blk_idx: HW block index
3267 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3269 struct ice_flow_prof *p, *tmp;
3271 mutex_lock(&hw->fl_profs_locks[blk_idx]);
3272 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3273 struct ice_flow_entry *e, *t;
3275 list_for_each_entry_safe(e, t, &p->entries, l_entry)
3276 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3277 ICE_FLOW_ENTRY_HNDL(e));
3279 list_del(&p->l_entry);
3281 mutex_destroy(&p->entries_lock);
3282 devm_kfree(ice_hw_to_dev(hw), p);
3284 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3286 /* if driver is in reset and tables are being cleared
3287 * re-initialize the flow profile list heads
3289 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3293 * ice_free_vsig_tbl - free complete VSIG table entries
3294 * @hw: pointer to the hardware structure
3295 * @blk: the HW block on which to free the VSIG table entries
3297 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3301 if (!hw->blk[blk].xlt2.vsig_tbl)
3304 for (i = 1; i < ICE_MAX_VSIGS; i++)
3305 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3306 ice_vsig_free(hw, blk, i);
3310 * ice_free_hw_tbls - free hardware table memory
3311 * @hw: pointer to the hardware structure
3313 void ice_free_hw_tbls(struct ice_hw *hw)
3315 struct ice_rss_cfg *r, *rt;
3318 for (i = 0; i < ICE_BLK_COUNT; i++) {
3319 if (hw->blk[i].is_list_init) {
3320 struct ice_es *es = &hw->blk[i].es;
3322 ice_free_prof_map(hw, i);
3323 mutex_destroy(&es->prof_map_lock);
3325 ice_free_flow_profs(hw, i);
3326 mutex_destroy(&hw->fl_profs_locks[i]);
3328 hw->blk[i].is_list_init = false;
3330 ice_free_vsig_tbl(hw, (enum ice_block)i);
3331 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3332 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3333 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3334 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3335 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3336 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3337 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3338 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3339 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3340 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
3341 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3342 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
3345 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3346 list_del(&r->l_entry);
3347 devm_kfree(ice_hw_to_dev(hw), r);
3349 mutex_destroy(&hw->rss_locks);
3350 ice_shutdown_all_prof_masks(hw);
3351 memset(hw->blk, 0, sizeof(hw->blk));
3355 * ice_init_flow_profs - init flow profile locks and list heads
3356 * @hw: pointer to the hardware structure
3357 * @blk_idx: HW block index
3359 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3361 mutex_init(&hw->fl_profs_locks[blk_idx]);
3362 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3366 * ice_clear_hw_tbls - clear HW tables and flow profiles
3367 * @hw: pointer to the hardware structure
3369 void ice_clear_hw_tbls(struct ice_hw *hw)
3373 for (i = 0; i < ICE_BLK_COUNT; i++) {
3374 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3375 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3376 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3377 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3378 struct ice_es *es = &hw->blk[i].es;
3380 if (hw->blk[i].is_list_init) {
3381 ice_free_prof_map(hw, i);
3382 ice_free_flow_profs(hw, i);
3385 ice_free_vsig_tbl(hw, (enum ice_block)i);
3387 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3388 memset(xlt1->ptg_tbl, 0,
3389 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3390 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3392 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3393 memset(xlt2->vsig_tbl, 0,
3394 xlt2->count * sizeof(*xlt2->vsig_tbl));
3395 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3397 memset(prof->t, 0, prof->count * sizeof(*prof->t));
3398 memset(prof_redir->t, 0,
3399 prof_redir->count * sizeof(*prof_redir->t));
3401 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3402 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3403 memset(es->written, 0, es->count * sizeof(*es->written));
3404 memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
3409 * ice_init_hw_tbls - init hardware table memory
3410 * @hw: pointer to the hardware structure
3412 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3416 mutex_init(&hw->rss_locks);
3417 INIT_LIST_HEAD(&hw->rss_list_head);
3418 ice_init_all_prof_masks(hw);
3419 for (i = 0; i < ICE_BLK_COUNT; i++) {
3420 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3421 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3422 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3423 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3424 struct ice_es *es = &hw->blk[i].es;
3427 if (hw->blk[i].is_list_init)
3430 ice_init_flow_profs(hw, i);
3431 mutex_init(&es->prof_map_lock);
3432 INIT_LIST_HEAD(&es->prof_map);
3433 hw->blk[i].is_list_init = true;
3435 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3436 es->reverse = blk_sizes[i].reverse;
3438 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3439 xlt1->count = blk_sizes[i].xlt1;
3441 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3442 sizeof(*xlt1->ptypes), GFP_KERNEL);
3447 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3448 sizeof(*xlt1->ptg_tbl),
3454 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3455 sizeof(*xlt1->t), GFP_KERNEL);
3459 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3460 xlt2->count = blk_sizes[i].xlt2;
3462 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3463 sizeof(*xlt2->vsis), GFP_KERNEL);
3468 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3469 sizeof(*xlt2->vsig_tbl),
3471 if (!xlt2->vsig_tbl)
3474 for (j = 0; j < xlt2->count; j++)
3475 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3477 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3478 sizeof(*xlt2->t), GFP_KERNEL);
3482 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3483 prof->count = blk_sizes[i].prof_tcam;
3484 prof->max_prof_id = blk_sizes[i].prof_id;
3485 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3486 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3487 sizeof(*prof->t), GFP_KERNEL);
3492 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3493 prof_redir->count = blk_sizes[i].prof_redir;
3494 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3496 sizeof(*prof_redir->t),
3502 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3503 es->count = blk_sizes[i].es;
3504 es->fvw = blk_sizes[i].fvw;
3505 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3506 (u32)(es->count * es->fvw),
3507 sizeof(*es->t), GFP_KERNEL);
3511 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3512 sizeof(*es->ref_count),
3517 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3518 sizeof(*es->written), GFP_KERNEL);
3522 es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3523 sizeof(*es->mask_ena), GFP_KERNEL);
3530 ice_free_hw_tbls(hw);
3531 return ICE_ERR_NO_MEMORY;
3535 * ice_prof_gen_key - generate profile ID key
3536 * @hw: pointer to the HW struct
3537 * @blk: the block in which to write profile ID to
3538 * @ptg: packet type group (PTG) portion of key
3539 * @vsig: VSIG portion of key
3540 * @cdid: CDID portion of key
3541 * @flags: flag portion of key
3542 * @vl_msk: valid mask
3543 * @dc_msk: don't care mask
3544 * @nm_msk: never match mask
3545 * @key: output of profile ID key
3547 static enum ice_status
3548 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3549 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3550 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3551 u8 key[ICE_TCAM_KEY_SZ])
3553 struct ice_prof_id_key inkey;
3556 inkey.xlt2_cdid = cpu_to_le16(vsig);
3557 inkey.flags = cpu_to_le16(flags);
3559 switch (hw->blk[blk].prof.cdid_bits) {
3563 #define ICE_CD_2_M 0xC000U
3564 #define ICE_CD_2_S 14
3565 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3566 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3569 #define ICE_CD_4_M 0xF000U
3570 #define ICE_CD_4_S 12
3571 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3572 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3575 #define ICE_CD_8_M 0xFF00U
3576 #define ICE_CD_8_S 16
3577 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3578 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3581 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3585 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3586 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3590 * ice_tcam_write_entry - write TCAM entry
3591 * @hw: pointer to the HW struct
3592 * @blk: the block in which to write profile ID to
3593 * @idx: the entry index to write to
3594 * @prof_id: profile ID
3595 * @ptg: packet type group (PTG) portion of key
3596 * @vsig: VSIG portion of key
3597 * @cdid: CDID portion of key
3598 * @flags: flag portion of key
3599 * @vl_msk: valid mask
3600 * @dc_msk: don't care mask
3601 * @nm_msk: never match mask
3603 static enum ice_status
3604 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3605 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3606 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3607 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3608 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3610 struct ice_prof_tcam_entry;
3611 enum ice_status status;
3613 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3614 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3616 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3617 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3624 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3625 * @hw: pointer to the hardware structure
3627 * @vsig: VSIG to query
3628 * @refs: pointer to variable to receive the reference count
3630 static enum ice_status
3631 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3633 u16 idx = vsig & ICE_VSIG_IDX_M;
3634 struct ice_vsig_vsi *ptr;
3638 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3639 return ICE_ERR_DOES_NOT_EXIST;
3641 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3644 ptr = ptr->next_vsi;
3651 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3652 * @hw: pointer to the hardware structure
3654 * @vsig: VSIG to check against
3655 * @hdl: profile handle
3658 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3660 u16 idx = vsig & ICE_VSIG_IDX_M;
3661 struct ice_vsig_prof *ent;
3663 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3665 if (ent->profile_cookie == hdl)
3668 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3674 * ice_prof_bld_es - build profile ID extraction sequence changes
3675 * @hw: pointer to the HW struct
3676 * @blk: hardware block
3677 * @bld: the update package buffer build to add to
3678 * @chgs: the list of changes to make in hardware
3680 static enum ice_status
3681 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3682 struct ice_buf_build *bld, struct list_head *chgs)
3684 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3685 struct ice_chs_chg *tmp;
3687 list_for_each_entry(tmp, chgs, list_entry)
3688 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3689 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3690 struct ice_pkg_es *p;
3693 id = ice_sect_id(blk, ICE_VEC_TBL);
3694 p = ice_pkg_buf_alloc_section(bld, id,
3695 struct_size(p, es, 1) +
3700 return ICE_ERR_MAX_LIMIT;
3702 p->count = cpu_to_le16(1);
3703 p->offset = cpu_to_le16(tmp->prof_id);
3705 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3712 * ice_prof_bld_tcam - build profile ID TCAM changes
3713 * @hw: pointer to the HW struct
3714 * @blk: hardware block
3715 * @bld: the update package buffer build to add to
3716 * @chgs: the list of changes to make in hardware
3718 static enum ice_status
3719 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3720 struct ice_buf_build *bld, struct list_head *chgs)
3722 struct ice_chs_chg *tmp;
3724 list_for_each_entry(tmp, chgs, list_entry)
3725 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3726 struct ice_prof_id_section *p;
3729 id = ice_sect_id(blk, ICE_PROF_TCAM);
3730 p = ice_pkg_buf_alloc_section(bld, id,
3731 struct_size(p, entry, 1));
3734 return ICE_ERR_MAX_LIMIT;
3736 p->count = cpu_to_le16(1);
3737 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3738 p->entry[0].prof_id = tmp->prof_id;
3740 memcpy(p->entry[0].key,
3741 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3742 sizeof(hw->blk[blk].prof.t->key));
3749 * ice_prof_bld_xlt1 - build XLT1 changes
3750 * @blk: hardware block
3751 * @bld: the update package buffer build to add to
3752 * @chgs: the list of changes to make in hardware
3754 static enum ice_status
3755 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3756 struct list_head *chgs)
3758 struct ice_chs_chg *tmp;
3760 list_for_each_entry(tmp, chgs, list_entry)
3761 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3762 struct ice_xlt1_section *p;
3765 id = ice_sect_id(blk, ICE_XLT1);
3766 p = ice_pkg_buf_alloc_section(bld, id,
3767 struct_size(p, value, 1));
3770 return ICE_ERR_MAX_LIMIT;
3772 p->count = cpu_to_le16(1);
3773 p->offset = cpu_to_le16(tmp->ptype);
3774 p->value[0] = tmp->ptg;
3781 * ice_prof_bld_xlt2 - build XLT2 changes
3782 * @blk: hardware block
3783 * @bld: the update package buffer build to add to
3784 * @chgs: the list of changes to make in hardware
3786 static enum ice_status
3787 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3788 struct list_head *chgs)
3790 struct ice_chs_chg *tmp;
3792 list_for_each_entry(tmp, chgs, list_entry) {
3793 struct ice_xlt2_section *p;
3796 switch (tmp->type) {
3800 id = ice_sect_id(blk, ICE_XLT2);
3801 p = ice_pkg_buf_alloc_section(bld, id,
3802 struct_size(p, value, 1));
3805 return ICE_ERR_MAX_LIMIT;
3807 p->count = cpu_to_le16(1);
3808 p->offset = cpu_to_le16(tmp->vsi);
3809 p->value[0] = cpu_to_le16(tmp->vsig);
3820 * ice_upd_prof_hw - update hardware using the change list
3821 * @hw: pointer to the HW struct
3822 * @blk: hardware block
3823 * @chgs: the list of changes to make in hardware
3825 static enum ice_status
3826 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3827 struct list_head *chgs)
3829 struct ice_buf_build *b;
3830 struct ice_chs_chg *tmp;
3831 enum ice_status status;
3839 /* count number of sections we need */
3840 list_for_each_entry(tmp, chgs, list_entry) {
3841 switch (tmp->type) {
3842 case ICE_PTG_ES_ADD:
3860 sects = xlt1 + xlt2 + tcam + es;
3865 /* Build update package buffer */
3866 b = ice_pkg_buf_alloc(hw);
3868 return ICE_ERR_NO_MEMORY;
3870 status = ice_pkg_buf_reserve_section(b, sects);
3874 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
3876 status = ice_prof_bld_es(hw, blk, b, chgs);
3882 status = ice_prof_bld_tcam(hw, blk, b, chgs);
3888 status = ice_prof_bld_xlt1(blk, b, chgs);
3894 status = ice_prof_bld_xlt2(blk, b, chgs);
3899 /* After package buffer build check if the section count in buffer is
3900 * non-zero and matches the number of sections detected for package
3903 pkg_sects = ice_pkg_buf_get_active_sections(b);
3904 if (!pkg_sects || pkg_sects != sects) {
3905 status = ICE_ERR_INVAL_SIZE;
3909 /* update package */
3910 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3911 if (status == ICE_ERR_AQ_ERROR)
3912 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3915 ice_pkg_buf_free(hw, b);
3920 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
3921 * @hw: pointer to the HW struct
3922 * @prof_id: profile ID
3923 * @mask_sel: mask select
3925 * This function enable any of the masks selected by the mask select parameter
3926 * for the profile specified.
3928 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3930 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3932 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3933 GLQF_FDMASK_SEL(prof_id), mask_sel);
3936 struct ice_fd_src_dst_pair {
3942 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3943 /* These are defined in pairs */
3944 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3945 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3947 { ICE_PROT_IPV4_IL, 2, 12 },
3948 { ICE_PROT_IPV4_IL, 2, 16 },
3950 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3951 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3953 { ICE_PROT_IPV6_IL, 8, 8 },
3954 { ICE_PROT_IPV6_IL, 8, 24 },
3956 { ICE_PROT_TCP_IL, 1, 0 },
3957 { ICE_PROT_TCP_IL, 1, 2 },
3959 { ICE_PROT_UDP_OF, 1, 0 },
3960 { ICE_PROT_UDP_OF, 1, 2 },
3962 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
3963 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
3965 { ICE_PROT_SCTP_IL, 1, 0 },
3966 { ICE_PROT_SCTP_IL, 1, 2 }
3969 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
3972 * ice_update_fd_swap - set register appropriately for a FD FV extraction
3973 * @hw: pointer to the HW struct
3974 * @prof_id: profile ID
3975 * @es: extraction sequence (length of array is determined by the block)
3977 static enum ice_status
3978 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3980 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3981 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3982 #define ICE_FD_FV_NOT_FOUND (-2)
3983 s8 first_free = ICE_FD_FV_NOT_FOUND;
3984 u8 used[ICE_MAX_FV_WORDS] = { 0 };
3989 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3991 /* This code assumes that the Flow Director field vectors are assigned
3992 * from the end of the FV indexes working towards the zero index, that
3993 * only complete fields will be included and will be consecutive, and
3994 * that there are no gaps between valid indexes.
3997 /* Determine swap fields present */
3998 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3999 /* Find the first free entry, assuming right to left population.
4000 * This is where we can start adding additional pairs if needed.
4002 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4006 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4007 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4008 es[i].off == ice_fd_pairs[j].off) {
4009 set_bit(j, pair_list);
4014 orig_free = first_free;
4016 /* determine missing swap fields that need to be added */
4017 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4018 u8 bit1 = test_bit(i + 1, pair_list);
4019 u8 bit0 = test_bit(i, pair_list);
4024 /* add the appropriate 'paired' entry */
4030 /* check for room */
4031 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4032 return ICE_ERR_MAX_LIMIT;
4034 /* place in extraction sequence */
4035 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4036 es[first_free - k].prot_id =
4037 ice_fd_pairs[index].prot_id;
4038 es[first_free - k].off =
4039 ice_fd_pairs[index].off + (k * 2);
4042 return ICE_ERR_OUT_OF_RANGE;
4044 /* keep track of non-relevant fields */
4045 mask_sel |= BIT(first_free - k);
4048 pair_start[index] = first_free;
4049 first_free -= ice_fd_pairs[index].count;
4053 /* fill in the swap array */
4054 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4056 u8 indexes_used = 1;
4058 /* assume flat at this index */
4059 #define ICE_SWAP_VALID 0x80
4060 used[si] = si | ICE_SWAP_VALID;
4062 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4067 /* check for a swap location */
4068 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4069 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4070 es[si].off == ice_fd_pairs[j].off) {
4073 /* determine the appropriate matching field */
4074 idx = j + ((j % 2) ? -1 : 1);
4076 indexes_used = ice_fd_pairs[idx].count;
4077 for (k = 0; k < indexes_used; k++) {
4078 used[si - k] = (pair_start[idx] - k) |
4088 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4091 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4095 for (k = 0; k < 4; k++) {
4099 if (used[idx] && !(mask_sel & BIT(idx))) {
4100 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4101 #define ICE_INSET_DFLT 0x9f
4102 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4106 /* write the appropriate swap register set */
4107 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4109 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4110 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4112 /* write the appropriate inset register set */
4113 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4115 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4116 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4119 /* initially clear the mask select for this profile */
4120 ice_update_fd_mask(hw, prof_id, 0);
4125 /* The entries here needs to match the order of enum ice_ptype_attrib */
4126 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4127 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4128 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4129 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4130 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4134 * ice_get_ptype_attrib_info - get PTYPE attribute information
4135 * @type: attribute type
4136 * @info: pointer to variable to the attribute information
4139 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4140 struct ice_ptype_attrib_info *info)
4142 *info = ice_ptype_attributes[type];
4146 * ice_add_prof_attrib - add any PTG with attributes to profile
4147 * @prof: pointer to the profile to which PTG entries will be added
4148 * @ptg: PTG to be added
4149 * @ptype: PTYPE that needs to be looked up
4150 * @attr: array of attributes that will be considered
4151 * @attr_cnt: number of elements in the attribute array
4153 static enum ice_status
4154 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4155 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4160 for (i = 0; i < attr_cnt; i++)
4161 if (attr[i].ptype == ptype) {
4164 prof->ptg[prof->ptg_cnt] = ptg;
4165 ice_get_ptype_attrib_info(attr[i].attrib,
4166 &prof->attr[prof->ptg_cnt]);
4168 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4169 return ICE_ERR_MAX_LIMIT;
4173 return ICE_ERR_DOES_NOT_EXIST;
4179 * ice_add_prof - add profile
4180 * @hw: pointer to the HW struct
4181 * @blk: hardware block
4182 * @id: profile tracking ID
4183 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4184 * @attr: array of attributes
4185 * @attr_cnt: number of elements in attr array
4186 * @es: extraction sequence (length of array is determined by the block)
4187 * @masks: mask for extraction sequence
4189 * This function registers a profile, which matches a set of PTYPES with a
4190 * particular extraction sequence. While the hardware profile is allocated
4191 * it will not be written until the first call to ice_add_flow that specifies
4192 * the ID value used here.
4195 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4196 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4197 struct ice_fv_word *es, u16 *masks)
4199 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4200 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4201 struct ice_prof_map *prof;
4202 enum ice_status status;
4206 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4208 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4210 /* search for existing profile */
4211 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4213 /* allocate profile ID */
4214 status = ice_alloc_prof_id(hw, blk, &prof_id);
4216 goto err_ice_add_prof;
4217 if (blk == ICE_BLK_FD) {
4218 /* For Flow Director block, the extraction sequence may
4219 * need to be altered in the case where there are paired
4220 * fields that have no match. This is necessary because
4221 * for Flow Director, src and dest fields need to paired
4222 * for filter programming and these values are swapped
4225 status = ice_update_fd_swap(hw, prof_id, es);
4227 goto err_ice_add_prof;
4229 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4231 goto err_ice_add_prof;
4233 /* and write new es */
4234 ice_write_es(hw, blk, prof_id, es);
4237 ice_prof_inc_ref(hw, blk, prof_id);
4239 /* add profile info */
4240 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4242 status = ICE_ERR_NO_MEMORY;
4243 goto err_ice_add_prof;
4246 prof->profile_cookie = id;
4247 prof->prof_id = prof_id;
4251 /* build list of ptgs */
4252 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4255 if (!ptypes[byte]) {
4261 /* Examine 8 bits per byte */
4262 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4267 ptype = byte * BITS_PER_BYTE + bit;
4269 /* The package should place all ptypes in a non-zero
4270 * PTG, so the following call should never fail.
4272 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4275 /* If PTG is already added, skip and continue */
4276 if (test_bit(ptg, ptgs_used))
4279 set_bit(ptg, ptgs_used);
4280 /* Check to see there are any attributes for
4281 * this PTYPE, and add them if found.
4283 status = ice_add_prof_attrib(prof, ptg, ptype,
4285 if (status == ICE_ERR_MAX_LIMIT)
4288 /* This is simple a PTYPE/PTG with no
4291 prof->ptg[prof->ptg_cnt] = ptg;
4292 prof->attr[prof->ptg_cnt].flags = 0;
4293 prof->attr[prof->ptg_cnt].mask = 0;
4295 if (++prof->ptg_cnt >=
4296 ICE_MAX_PTG_PER_PROFILE)
4305 list_add(&prof->list, &hw->blk[blk].es.prof_map);
4309 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4314 * ice_search_prof_id - Search for a profile tracking ID
4315 * @hw: pointer to the HW struct
4316 * @blk: hardware block
4317 * @id: profile tracking ID
4319 * This will search for a profile tracking ID which was previously added.
4320 * The profile map lock should be held before calling this function.
4322 static struct ice_prof_map *
4323 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4325 struct ice_prof_map *entry = NULL;
4326 struct ice_prof_map *map;
4328 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4329 if (map->profile_cookie == id) {
4338 * ice_vsig_prof_id_count - count profiles in a VSIG
4339 * @hw: pointer to the HW struct
4340 * @blk: hardware block
4341 * @vsig: VSIG to remove the profile from
4344 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4346 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4347 struct ice_vsig_prof *p;
4349 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4357 * ice_rel_tcam_idx - release a TCAM index
4358 * @hw: pointer to the HW struct
4359 * @blk: hardware block
4360 * @idx: the index to release
4362 static enum ice_status
4363 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4365 /* Masks to invoke a never match entry */
4366 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4367 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4368 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4369 enum ice_status status;
4371 /* write the TCAM entry */
4372 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4377 /* release the TCAM entry */
4378 status = ice_free_tcam_ent(hw, blk, idx);
4384 * ice_rem_prof_id - remove one profile from a VSIG
4385 * @hw: pointer to the HW struct
4386 * @blk: hardware block
4387 * @prof: pointer to profile structure to remove
4389 static enum ice_status
4390 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4391 struct ice_vsig_prof *prof)
4393 enum ice_status status;
4396 for (i = 0; i < prof->tcam_count; i++)
4397 if (prof->tcam[i].in_use) {
4398 prof->tcam[i].in_use = false;
4399 status = ice_rel_tcam_idx(hw, blk,
4400 prof->tcam[i].tcam_idx);
4402 return ICE_ERR_HW_TABLE;
4409 * ice_rem_vsig - remove VSIG
4410 * @hw: pointer to the HW struct
4411 * @blk: hardware block
4412 * @vsig: the VSIG to remove
4413 * @chg: the change list
4415 static enum ice_status
4416 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4417 struct list_head *chg)
4419 u16 idx = vsig & ICE_VSIG_IDX_M;
4420 struct ice_vsig_vsi *vsi_cur;
4421 struct ice_vsig_prof *d, *t;
4422 enum ice_status status;
4424 /* remove TCAM entries */
4425 list_for_each_entry_safe(d, t,
4426 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4428 status = ice_rem_prof_id(hw, blk, d);
4433 devm_kfree(ice_hw_to_dev(hw), d);
4436 /* Move all VSIS associated with this VSIG to the default VSIG */
4437 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4438 /* If the VSIG has at least 1 VSI then iterate through the list
4439 * and remove the VSIs before deleting the group.
4443 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4444 struct ice_chs_chg *p;
4446 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4449 return ICE_ERR_NO_MEMORY;
4451 p->type = ICE_VSIG_REM;
4452 p->orig_vsig = vsig;
4453 p->vsig = ICE_DEFAULT_VSIG;
4454 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4456 list_add(&p->list_entry, chg);
4461 return ice_vsig_free(hw, blk, vsig);
4465 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4466 * @hw: pointer to the HW struct
4467 * @blk: hardware block
4468 * @vsig: VSIG to remove the profile from
4469 * @hdl: profile handle indicating which profile to remove
4470 * @chg: list to receive a record of changes
4472 static enum ice_status
4473 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4474 struct list_head *chg)
4476 u16 idx = vsig & ICE_VSIG_IDX_M;
4477 struct ice_vsig_prof *p, *t;
4478 enum ice_status status;
4480 list_for_each_entry_safe(p, t,
4481 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4483 if (p->profile_cookie == hdl) {
4484 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4485 /* this is the last profile, remove the VSIG */
4486 return ice_rem_vsig(hw, blk, vsig, chg);
4488 status = ice_rem_prof_id(hw, blk, p);
4491 devm_kfree(ice_hw_to_dev(hw), p);
4496 return ICE_ERR_DOES_NOT_EXIST;
4500 * ice_rem_flow_all - remove all flows with a particular profile
4501 * @hw: pointer to the HW struct
4502 * @blk: hardware block
4503 * @id: profile tracking ID
4505 static enum ice_status
4506 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4508 struct ice_chs_chg *del, *tmp;
4509 enum ice_status status;
4510 struct list_head chg;
4513 INIT_LIST_HEAD(&chg);
4515 for (i = 1; i < ICE_MAX_VSIGS; i++)
4516 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4517 if (ice_has_prof_vsig(hw, blk, i, id)) {
4518 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4521 goto err_ice_rem_flow_all;
4525 status = ice_upd_prof_hw(hw, blk, &chg);
4527 err_ice_rem_flow_all:
4528 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4529 list_del(&del->list_entry);
4530 devm_kfree(ice_hw_to_dev(hw), del);
4537 * ice_rem_prof - remove profile
4538 * @hw: pointer to the HW struct
4539 * @blk: hardware block
4540 * @id: profile tracking ID
4542 * This will remove the profile specified by the ID parameter, which was
4543 * previously created through ice_add_prof. If any existing entries
4544 * are associated with this profile, they will be removed as well.
4546 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4548 struct ice_prof_map *pmap;
4549 enum ice_status status;
4551 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4553 pmap = ice_search_prof_id(hw, blk, id);
4555 status = ICE_ERR_DOES_NOT_EXIST;
4556 goto err_ice_rem_prof;
4559 /* remove all flows with this profile */
4560 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4562 goto err_ice_rem_prof;
4564 /* dereference profile, and possibly remove */
4565 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4567 list_del(&pmap->list);
4568 devm_kfree(ice_hw_to_dev(hw), pmap);
4571 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4576 * ice_get_prof - get profile
4577 * @hw: pointer to the HW struct
4578 * @blk: hardware block
4579 * @hdl: profile handle
4582 static enum ice_status
4583 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4584 struct list_head *chg)
4586 enum ice_status status = 0;
4587 struct ice_prof_map *map;
4588 struct ice_chs_chg *p;
4591 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4592 /* Get the details on the profile specified by the handle ID */
4593 map = ice_search_prof_id(hw, blk, hdl);
4595 status = ICE_ERR_DOES_NOT_EXIST;
4596 goto err_ice_get_prof;
4599 for (i = 0; i < map->ptg_cnt; i++)
4600 if (!hw->blk[blk].es.written[map->prof_id]) {
4601 /* add ES to change list */
4602 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4605 status = ICE_ERR_NO_MEMORY;
4606 goto err_ice_get_prof;
4609 p->type = ICE_PTG_ES_ADD;
4611 p->ptg = map->ptg[i];
4615 p->prof_id = map->prof_id;
4617 hw->blk[blk].es.written[map->prof_id] = true;
4619 list_add(&p->list_entry, chg);
4623 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4624 /* let caller clean up the change list */
4629 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4630 * @hw: pointer to the HW struct
4631 * @blk: hardware block
4632 * @vsig: VSIG from which to copy the list
4635 * This routine makes a copy of the list of profiles in the specified VSIG.
4637 static enum ice_status
4638 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4639 struct list_head *lst)
4641 struct ice_vsig_prof *ent1, *ent2;
4642 u16 idx = vsig & ICE_VSIG_IDX_M;
4644 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4646 struct ice_vsig_prof *p;
4648 /* copy to the input list */
4649 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4652 goto err_ice_get_profs_vsig;
4654 list_add_tail(&p->list, lst);
4659 err_ice_get_profs_vsig:
4660 list_for_each_entry_safe(ent1, ent2, lst, list) {
4661 list_del(&ent1->list);
4662 devm_kfree(ice_hw_to_dev(hw), ent1);
4665 return ICE_ERR_NO_MEMORY;
4669 * ice_add_prof_to_lst - add profile entry to a list
4670 * @hw: pointer to the HW struct
4671 * @blk: hardware block
4672 * @lst: the list to be added to
4673 * @hdl: profile handle of entry to add
4675 static enum ice_status
4676 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4677 struct list_head *lst, u64 hdl)
4679 enum ice_status status = 0;
4680 struct ice_prof_map *map;
4681 struct ice_vsig_prof *p;
4684 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4685 map = ice_search_prof_id(hw, blk, hdl);
4687 status = ICE_ERR_DOES_NOT_EXIST;
4688 goto err_ice_add_prof_to_lst;
4691 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4693 status = ICE_ERR_NO_MEMORY;
4694 goto err_ice_add_prof_to_lst;
4697 p->profile_cookie = map->profile_cookie;
4698 p->prof_id = map->prof_id;
4699 p->tcam_count = map->ptg_cnt;
4701 for (i = 0; i < map->ptg_cnt; i++) {
4702 p->tcam[i].prof_id = map->prof_id;
4703 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4704 p->tcam[i].ptg = map->ptg[i];
4707 list_add(&p->list, lst);
4709 err_ice_add_prof_to_lst:
4710 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4715 * ice_move_vsi - move VSI to another VSIG
4716 * @hw: pointer to the HW struct
4717 * @blk: hardware block
4718 * @vsi: the VSI to move
4719 * @vsig: the VSIG to move the VSI to
4720 * @chg: the change list
4722 static enum ice_status
4723 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4724 struct list_head *chg)
4726 enum ice_status status;
4727 struct ice_chs_chg *p;
4730 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4732 return ICE_ERR_NO_MEMORY;
4734 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4736 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4739 devm_kfree(ice_hw_to_dev(hw), p);
4743 p->type = ICE_VSI_MOVE;
4745 p->orig_vsig = orig_vsig;
4748 list_add(&p->list_entry, chg);
4754 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4755 * @hw: pointer to the HW struct
4756 * @idx: the index of the TCAM entry to remove
4757 * @chg: the list of change structures to search
4760 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4762 struct ice_chs_chg *pos, *tmp;
4764 list_for_each_entry_safe(tmp, pos, chg, list_entry)
4765 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4766 list_del(&tmp->list_entry);
4767 devm_kfree(ice_hw_to_dev(hw), tmp);
4772 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4773 * @hw: pointer to the HW struct
4774 * @blk: hardware block
4775 * @enable: true to enable, false to disable
4776 * @vsig: the VSIG of the TCAM entry
4777 * @tcam: pointer the TCAM info structure of the TCAM to disable
4778 * @chg: the change list
4780 * This function appends an enable or disable TCAM entry in the change log
4782 static enum ice_status
4783 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4784 u16 vsig, struct ice_tcam_inf *tcam,
4785 struct list_head *chg)
4787 enum ice_status status;
4788 struct ice_chs_chg *p;
4790 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4791 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4792 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4794 /* if disabling, free the TCAM */
4796 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4798 /* if we have already created a change for this TCAM entry, then
4799 * we need to remove that entry, in order to prevent writing to
4800 * a TCAM entry we no longer will have ownership of.
4802 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4808 /* for re-enabling, reallocate a TCAM */
4809 /* for entries with empty attribute masks, allocate entry from
4810 * the bottom of the TCAM table; otherwise, allocate from the
4811 * top of the table in order to give it higher priority
4813 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
4818 /* add TCAM to change list */
4819 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4821 return ICE_ERR_NO_MEMORY;
4823 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4824 tcam->ptg, vsig, 0, tcam->attr.flags,
4825 vl_msk, dc_msk, nm_msk);
4827 goto err_ice_prof_tcam_ena_dis;
4831 p->type = ICE_TCAM_ADD;
4832 p->add_tcam_idx = true;
4833 p->prof_id = tcam->prof_id;
4836 p->tcam_idx = tcam->tcam_idx;
4839 list_add(&p->list_entry, chg);
4843 err_ice_prof_tcam_ena_dis:
4844 devm_kfree(ice_hw_to_dev(hw), p);
4849 * ice_adj_prof_priorities - adjust profile based on priorities
4850 * @hw: pointer to the HW struct
4851 * @blk: hardware block
4852 * @vsig: the VSIG for which to adjust profile priorities
4853 * @chg: the change list
4855 static enum ice_status
4856 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4857 struct list_head *chg)
4859 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4860 struct ice_vsig_prof *t;
4861 enum ice_status status;
4864 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4865 idx = vsig & ICE_VSIG_IDX_M;
4867 /* Priority is based on the order in which the profiles are added. The
4868 * newest added profile has highest priority and the oldest added
4869 * profile has the lowest priority. Since the profile property list for
4870 * a VSIG is sorted from newest to oldest, this code traverses the list
4871 * in order and enables the first of each PTG that it finds (that is not
4872 * already enabled); it also disables any duplicate PTGs that it finds
4873 * in the older profiles (that are currently enabled).
4876 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4880 for (i = 0; i < t->tcam_count; i++) {
4881 /* Scan the priorities from newest to oldest.
4882 * Make sure that the newest profiles take priority.
4884 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4885 t->tcam[i].in_use) {
4886 /* need to mark this PTG as never match, as it
4887 * was already in use and therefore duplicate
4888 * (and lower priority)
4890 status = ice_prof_tcam_ena_dis(hw, blk, false,
4896 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4897 !t->tcam[i].in_use) {
4898 /* need to enable this PTG, as it in not in use
4899 * and not enabled (highest priority)
4901 status = ice_prof_tcam_ena_dis(hw, blk, true,
4909 /* keep track of used ptgs */
4910 set_bit(t->tcam[i].ptg, ptgs_used);
4918 * ice_add_prof_id_vsig - add profile to VSIG
4919 * @hw: pointer to the HW struct
4920 * @blk: hardware block
4921 * @vsig: the VSIG to which this profile is to be added
4922 * @hdl: the profile handle indicating the profile to add
4923 * @rev: true to add entries to the end of the list
4924 * @chg: the change list
4926 static enum ice_status
4927 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4928 bool rev, struct list_head *chg)
4930 /* Masks that ignore flags */
4931 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4932 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4933 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4934 enum ice_status status = 0;
4935 struct ice_prof_map *map;
4936 struct ice_vsig_prof *t;
4937 struct ice_chs_chg *p;
4940 /* Error, if this VSIG already has this profile */
4941 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4942 return ICE_ERR_ALREADY_EXISTS;
4944 /* new VSIG profile structure */
4945 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4947 return ICE_ERR_NO_MEMORY;
4949 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4950 /* Get the details on the profile specified by the handle ID */
4951 map = ice_search_prof_id(hw, blk, hdl);
4953 status = ICE_ERR_DOES_NOT_EXIST;
4954 goto err_ice_add_prof_id_vsig;
4957 t->profile_cookie = map->profile_cookie;
4958 t->prof_id = map->prof_id;
4959 t->tcam_count = map->ptg_cnt;
4961 /* create TCAM entries */
4962 for (i = 0; i < map->ptg_cnt; i++) {
4965 /* add TCAM to change list */
4966 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4968 status = ICE_ERR_NO_MEMORY;
4969 goto err_ice_add_prof_id_vsig;
4972 /* allocate the TCAM entry index */
4973 /* for entries with empty attribute masks, allocate entry from
4974 * the bottom of the TCAM table; otherwise, allocate from the
4975 * top of the table in order to give it higher priority
4977 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
4980 devm_kfree(ice_hw_to_dev(hw), p);
4981 goto err_ice_add_prof_id_vsig;
4984 t->tcam[i].ptg = map->ptg[i];
4985 t->tcam[i].prof_id = map->prof_id;
4986 t->tcam[i].tcam_idx = tcam_idx;
4987 t->tcam[i].attr = map->attr[i];
4988 t->tcam[i].in_use = true;
4990 p->type = ICE_TCAM_ADD;
4991 p->add_tcam_idx = true;
4992 p->prof_id = t->tcam[i].prof_id;
4993 p->ptg = t->tcam[i].ptg;
4995 p->tcam_idx = t->tcam[i].tcam_idx;
4997 /* write the TCAM entry */
4998 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5000 t->tcam[i].ptg, vsig, 0, 0,
5001 vl_msk, dc_msk, nm_msk);
5003 devm_kfree(ice_hw_to_dev(hw), p);
5004 goto err_ice_add_prof_id_vsig;
5008 list_add(&p->list_entry, chg);
5011 /* add profile to VSIG */
5012 vsig_idx = vsig & ICE_VSIG_IDX_M;
5014 list_add_tail(&t->list,
5015 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5018 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5020 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5023 err_ice_add_prof_id_vsig:
5024 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5025 /* let caller clean up the change list */
5026 devm_kfree(ice_hw_to_dev(hw), t);
5031 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5032 * @hw: pointer to the HW struct
5033 * @blk: hardware block
5034 * @vsi: the initial VSI that will be in VSIG
5035 * @hdl: the profile handle of the profile that will be added to the VSIG
5036 * @chg: the change list
5038 static enum ice_status
5039 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5040 struct list_head *chg)
5042 enum ice_status status;
5043 struct ice_chs_chg *p;
5046 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5048 return ICE_ERR_NO_MEMORY;
5050 new_vsig = ice_vsig_alloc(hw, blk);
5052 status = ICE_ERR_HW_TABLE;
5053 goto err_ice_create_prof_id_vsig;
5056 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5058 goto err_ice_create_prof_id_vsig;
5060 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5062 goto err_ice_create_prof_id_vsig;
5064 p->type = ICE_VSIG_ADD;
5066 p->orig_vsig = ICE_DEFAULT_VSIG;
5069 list_add(&p->list_entry, chg);
5073 err_ice_create_prof_id_vsig:
5074 /* let caller clean up the change list */
5075 devm_kfree(ice_hw_to_dev(hw), p);
5080 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5081 * @hw: pointer to the HW struct
5082 * @blk: hardware block
5083 * @vsi: the initial VSI that will be in VSIG
5084 * @lst: the list of profile that will be added to the VSIG
5085 * @new_vsig: return of new VSIG
5086 * @chg: the change list
5088 static enum ice_status
5089 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5090 struct list_head *lst, u16 *new_vsig,
5091 struct list_head *chg)
5093 struct ice_vsig_prof *t;
5094 enum ice_status status;
5097 vsig = ice_vsig_alloc(hw, blk);
5099 return ICE_ERR_HW_TABLE;
5101 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5105 list_for_each_entry(t, lst, list) {
5106 /* Reverse the order here since we are copying the list */
5107 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5119 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5120 * @hw: pointer to the HW struct
5121 * @blk: hardware block
5122 * @hdl: the profile handle of the profile to search for
5123 * @vsig: returns the VSIG with the matching profile
5126 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5128 struct ice_vsig_prof *t;
5129 enum ice_status status;
5130 struct list_head lst;
5132 INIT_LIST_HEAD(&lst);
5134 t = kzalloc(sizeof(*t), GFP_KERNEL);
5138 t->profile_cookie = hdl;
5139 list_add(&t->list, &lst);
5141 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5150 * ice_add_prof_id_flow - add profile flow
5151 * @hw: pointer to the HW struct
5152 * @blk: hardware block
5153 * @vsi: the VSI to enable with the profile specified by ID
5154 * @hdl: profile handle
5156 * Calling this function will update the hardware tables to enable the
5157 * profile indicated by the ID parameter for the VSIs specified in the VSI
5158 * array. Once successfully called, the flow will be enabled.
5161 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5163 struct ice_vsig_prof *tmp1, *del1;
5164 struct ice_chs_chg *tmp, *del;
5165 struct list_head union_lst;
5166 enum ice_status status;
5167 struct list_head chg;
5170 INIT_LIST_HEAD(&union_lst);
5171 INIT_LIST_HEAD(&chg);
5174 status = ice_get_prof(hw, blk, hdl, &chg);
5178 /* determine if VSI is already part of a VSIG */
5179 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5180 if (!status && vsig) {
5188 /* make sure that there is no overlap/conflict between the new
5189 * characteristics and the existing ones; we don't support that
5192 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5193 status = ICE_ERR_ALREADY_EXISTS;
5194 goto err_ice_add_prof_id_flow;
5197 /* last VSI in the VSIG? */
5198 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5200 goto err_ice_add_prof_id_flow;
5201 only_vsi = (ref == 1);
5203 /* create a union of the current profiles and the one being
5206 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5208 goto err_ice_add_prof_id_flow;
5210 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5212 goto err_ice_add_prof_id_flow;
5214 /* search for an existing VSIG with an exact charc match */
5215 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5217 /* move VSI to the VSIG that matches */
5218 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5220 goto err_ice_add_prof_id_flow;
5222 /* VSI has been moved out of or_vsig. If the or_vsig had
5223 * only that VSI it is now empty and can be removed.
5226 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5228 goto err_ice_add_prof_id_flow;
5230 } else if (only_vsi) {
5231 /* If the original VSIG only contains one VSI, then it
5232 * will be the requesting VSI. In this case the VSI is
5233 * not sharing entries and we can simply add the new
5234 * profile to the VSIG.
5236 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5239 goto err_ice_add_prof_id_flow;
5241 /* Adjust priorities */
5242 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5244 goto err_ice_add_prof_id_flow;
5246 /* No match, so we need a new VSIG */
5247 status = ice_create_vsig_from_lst(hw, blk, vsi,
5251 goto err_ice_add_prof_id_flow;
5253 /* Adjust priorities */
5254 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5256 goto err_ice_add_prof_id_flow;
5259 /* need to find or add a VSIG */
5260 /* search for an existing VSIG with an exact charc match */
5261 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5262 /* found an exact match */
5263 /* add or move VSI to the VSIG that matches */
5264 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5266 goto err_ice_add_prof_id_flow;
5268 /* we did not find an exact match */
5269 /* we need to add a VSIG */
5270 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5273 goto err_ice_add_prof_id_flow;
5277 /* update hardware */
5279 status = ice_upd_prof_hw(hw, blk, &chg);
5281 err_ice_add_prof_id_flow:
5282 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5283 list_del(&del->list_entry);
5284 devm_kfree(ice_hw_to_dev(hw), del);
5287 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5288 list_del(&del1->list);
5289 devm_kfree(ice_hw_to_dev(hw), del1);
5296 * ice_rem_prof_from_list - remove a profile from list
5297 * @hw: pointer to the HW struct
5298 * @lst: list to remove the profile from
5299 * @hdl: the profile handle indicating the profile to remove
5301 static enum ice_status
5302 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5304 struct ice_vsig_prof *ent, *tmp;
5306 list_for_each_entry_safe(ent, tmp, lst, list)
5307 if (ent->profile_cookie == hdl) {
5308 list_del(&ent->list);
5309 devm_kfree(ice_hw_to_dev(hw), ent);
5313 return ICE_ERR_DOES_NOT_EXIST;
5317 * ice_rem_prof_id_flow - remove flow
5318 * @hw: pointer to the HW struct
5319 * @blk: hardware block
5320 * @vsi: the VSI from which to remove the profile specified by ID
5321 * @hdl: profile tracking handle
5323 * Calling this function will update the hardware tables to remove the
5324 * profile indicated by the ID parameter for the VSIs specified in the VSI
5325 * array. Once successfully called, the flow will be disabled.
5328 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5330 struct ice_vsig_prof *tmp1, *del1;
5331 struct ice_chs_chg *tmp, *del;
5332 struct list_head chg, copy;
5333 enum ice_status status;
5336 INIT_LIST_HEAD(©);
5337 INIT_LIST_HEAD(&chg);
5339 /* determine if VSI is already part of a VSIG */
5340 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5341 if (!status && vsig) {
5347 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5348 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5350 goto err_ice_rem_prof_id_flow;
5351 only_vsi = (ref == 1);
5354 /* If the original VSIG only contains one reference,
5355 * which will be the requesting VSI, then the VSI is not
5356 * sharing entries and we can simply remove the specific
5357 * characteristics from the VSIG.
5361 /* If there are no profiles left for this VSIG,
5362 * then simply remove the VSIG.
5364 status = ice_rem_vsig(hw, blk, vsig, &chg);
5366 goto err_ice_rem_prof_id_flow;
5368 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5371 goto err_ice_rem_prof_id_flow;
5373 /* Adjust priorities */
5374 status = ice_adj_prof_priorities(hw, blk, vsig,
5377 goto err_ice_rem_prof_id_flow;
5381 /* Make a copy of the VSIG's list of Profiles */
5382 status = ice_get_profs_vsig(hw, blk, vsig, ©);
5384 goto err_ice_rem_prof_id_flow;
5386 /* Remove specified profile entry from the list */
5387 status = ice_rem_prof_from_list(hw, ©, hdl);
5389 goto err_ice_rem_prof_id_flow;
5391 if (list_empty(©)) {
5392 status = ice_move_vsi(hw, blk, vsi,
5393 ICE_DEFAULT_VSIG, &chg);
5395 goto err_ice_rem_prof_id_flow;
5397 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
5399 /* found an exact match */
5400 /* add or move VSI to the VSIG that matches */
5401 /* Search for a VSIG with a matching profile
5405 /* Found match, move VSI to the matching VSIG */
5406 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5408 goto err_ice_rem_prof_id_flow;
5410 /* since no existing VSIG supports this
5411 * characteristic pattern, we need to create a
5412 * new VSIG and TCAM entries
5414 status = ice_create_vsig_from_lst(hw, blk, vsi,
5418 goto err_ice_rem_prof_id_flow;
5420 /* Adjust priorities */
5421 status = ice_adj_prof_priorities(hw, blk, vsig,
5424 goto err_ice_rem_prof_id_flow;
5428 status = ICE_ERR_DOES_NOT_EXIST;
5431 /* update hardware tables */
5433 status = ice_upd_prof_hw(hw, blk, &chg);
5435 err_ice_rem_prof_id_flow:
5436 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5437 list_del(&del->list_entry);
5438 devm_kfree(ice_hw_to_dev(hw), del);
5441 list_for_each_entry_safe(del1, tmp1, ©, list) {
5442 list_del(&del1->list);
5443 devm_kfree(ice_hw_to_dev(hw), del1);