1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3
4 #include <linux/bitfield.h>
5 #include <linux/if_ether.h>
6 #include <linux/delay.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10
11 #include "e1000_mac.h"
12
13 #include "igb.h"
14
15 static s32 igb_set_default_fc(struct e1000_hw *hw);
16 static void igb_set_fc_watermarks(struct e1000_hw *hw);
17
18 /**
19 * igb_get_bus_info_pcie - Get PCIe bus information
20 * @hw: pointer to the HW structure
21 *
22 * Determines and stores the system bus information for a particular
23 * network interface. The following bus information is determined and stored:
24 * bus speed, bus width, type (PCIe), and PCIe function.
25 **/
igb_get_bus_info_pcie(struct e1000_hw * hw)26 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
27 {
28 struct e1000_bus_info *bus = &hw->bus;
29 s32 ret_val;
30 u32 reg;
31 u16 pcie_link_status;
32
33 bus->type = e1000_bus_type_pci_express;
34
35 ret_val = igb_read_pcie_cap_reg(hw,
36 PCI_EXP_LNKSTA,
37 &pcie_link_status);
38 if (ret_val) {
39 bus->width = e1000_bus_width_unknown;
40 bus->speed = e1000_bus_speed_unknown;
41 } else {
42 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
43 case PCI_EXP_LNKSTA_CLS_2_5GB:
44 bus->speed = e1000_bus_speed_2500;
45 break;
46 case PCI_EXP_LNKSTA_CLS_5_0GB:
47 bus->speed = e1000_bus_speed_5000;
48 break;
49 default:
50 bus->speed = e1000_bus_speed_unknown;
51 break;
52 }
53
54 bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
55 pcie_link_status);
56 }
57
58 reg = rd32(E1000_STATUS);
59 bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
60
61 return 0;
62 }
63
64 /**
65 * igb_clear_vfta - Clear VLAN filter table
66 * @hw: pointer to the HW structure
67 *
68 * Clears the register array which contains the VLAN filter table by
69 * setting all the values to 0.
70 **/
igb_clear_vfta(struct e1000_hw * hw)71 void igb_clear_vfta(struct e1000_hw *hw)
72 {
73 u32 offset;
74
75 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
76 hw->mac.ops.write_vfta(hw, offset, 0);
77 }
78
79 /**
80 * igb_write_vfta - Write value to VLAN filter table
81 * @hw: pointer to the HW structure
82 * @offset: register offset in VLAN filter table
83 * @value: register value written to VLAN filter table
84 *
85 * Writes value at the given offset in the register array which stores
86 * the VLAN filter table.
87 **/
igb_write_vfta(struct e1000_hw * hw,u32 offset,u32 value)88 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
89 {
90 struct igb_adapter *adapter = hw->back;
91
92 array_wr32(E1000_VFTA, offset, value);
93 wrfl();
94
95 adapter->shadow_vfta[offset] = value;
96 }
97
98 /**
99 * igb_init_rx_addrs - Initialize receive address's
100 * @hw: pointer to the HW structure
101 * @rar_count: receive address registers
102 *
103 * Setups the receive address registers by setting the base receive address
104 * register to the devices MAC address and clearing all the other receive
105 * address registers to 0.
106 **/
igb_init_rx_addrs(struct e1000_hw * hw,u16 rar_count)107 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
108 {
109 u32 i;
110 u8 mac_addr[ETH_ALEN] = {0};
111
112 /* Setup the receive address */
113 hw_dbg("Programming MAC Address into RAR[0]\n");
114
115 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
116
117 /* Zero out the other (rar_entry_count - 1) receive addresses */
118 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
119 for (i = 1; i < rar_count; i++)
120 hw->mac.ops.rar_set(hw, mac_addr, i);
121 }
122
123 /**
124 * igb_find_vlvf_slot - find the VLAN id or the first empty slot
125 * @hw: pointer to hardware structure
126 * @vlan: VLAN id to write to VLAN filter
127 * @vlvf_bypass: skip VLVF if no match is found
128 *
129 * return the VLVF index where this VLAN id should be placed
130 *
131 **/
igb_find_vlvf_slot(struct e1000_hw * hw,u32 vlan,bool vlvf_bypass)132 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
133 {
134 s32 regindex, first_empty_slot;
135 u32 bits;
136
137 /* short cut the special case */
138 if (vlan == 0)
139 return 0;
140
141 /* if vlvf_bypass is set we don't want to use an empty slot, we
142 * will simply bypass the VLVF if there are no entries present in the
143 * VLVF that contain our VLAN
144 */
145 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
146
147 /* Search for the VLAN id in the VLVF entries. Save off the first empty
148 * slot found along the way.
149 *
150 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
151 */
152 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
153 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
154 if (bits == vlan)
155 return regindex;
156 if (!first_empty_slot && !bits)
157 first_empty_slot = regindex;
158 }
159
160 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
161 }
162
163 /**
164 * igb_vfta_set - enable or disable vlan in VLAN filter table
165 * @hw: pointer to the HW structure
166 * @vlan: VLAN id to add or remove
167 * @vind: VMDq output index that maps queue to VLAN id
168 * @vlan_on: if true add filter, if false remove
169 * @vlvf_bypass: skip VLVF if no match is found
170 *
171 * Sets or clears a bit in the VLAN filter table array based on VLAN id
172 * and if we are adding or removing the filter
173 **/
igb_vfta_set(struct e1000_hw * hw,u32 vlan,u32 vind,bool vlan_on,bool vlvf_bypass)174 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
175 bool vlan_on, bool vlvf_bypass)
176 {
177 struct igb_adapter *adapter = hw->back;
178 u32 regidx, vfta_delta, vfta, bits;
179 s32 vlvf_index;
180
181 if ((vlan > 4095) || (vind > 7))
182 return -E1000_ERR_PARAM;
183
184 /* this is a 2 part operation - first the VFTA, then the
185 * VLVF and VLVFB if VT Mode is set
186 * We don't write the VFTA until we know the VLVF part succeeded.
187 */
188
189 /* Part 1
190 * The VFTA is a bitstring made up of 128 32-bit registers
191 * that enable the particular VLAN id, much like the MTA:
192 * bits[11-5]: which register
193 * bits[4-0]: which bit in the register
194 */
195 regidx = vlan / 32;
196 vfta_delta = BIT(vlan % 32);
197 vfta = adapter->shadow_vfta[regidx];
198
199 /* vfta_delta represents the difference between the current value
200 * of vfta and the value we want in the register. Since the diff
201 * is an XOR mask we can just update vfta using an XOR.
202 */
203 vfta_delta &= vlan_on ? ~vfta : vfta;
204 vfta ^= vfta_delta;
205
206 /* Part 2
207 * If VT Mode is set
208 * Either vlan_on
209 * make sure the VLAN is in VLVF
210 * set the vind bit in the matching VLVFB
211 * Or !vlan_on
212 * clear the pool bit and possibly the vind
213 */
214 if (!adapter->vfs_allocated_count)
215 goto vfta_update;
216
217 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
218 if (vlvf_index < 0) {
219 if (vlvf_bypass)
220 goto vfta_update;
221 return vlvf_index;
222 }
223
224 bits = rd32(E1000_VLVF(vlvf_index));
225
226 /* set the pool bit */
227 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
228 if (vlan_on)
229 goto vlvf_update;
230
231 /* clear the pool bit */
232 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
233
234 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
235 /* Clear VFTA first, then disable VLVF. Otherwise
236 * we run the risk of stray packets leaking into
237 * the PF via the default pool
238 */
239 if (vfta_delta)
240 hw->mac.ops.write_vfta(hw, regidx, vfta);
241
242 /* disable VLVF and clear remaining bit from pool */
243 wr32(E1000_VLVF(vlvf_index), 0);
244
245 return 0;
246 }
247
248 /* If there are still bits set in the VLVFB registers
249 * for the VLAN ID indicated we need to see if the
250 * caller is requesting that we clear the VFTA entry bit.
251 * If the caller has requested that we clear the VFTA
252 * entry bit but there are still pools/VFs using this VLAN
253 * ID entry then ignore the request. We're not worried
254 * about the case where we're turning the VFTA VLAN ID
255 * entry bit on, only when requested to turn it off as
256 * there may be multiple pools and/or VFs using the
257 * VLAN ID entry. In that case we cannot clear the
258 * VFTA bit until all pools/VFs using that VLAN ID have also
259 * been cleared. This will be indicated by "bits" being
260 * zero.
261 */
262 vfta_delta = 0;
263
264 vlvf_update:
265 /* record pool change and enable VLAN ID if not already enabled */
266 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
267
268 vfta_update:
269 /* bit was set/cleared before we started */
270 if (vfta_delta)
271 hw->mac.ops.write_vfta(hw, regidx, vfta);
272
273 return 0;
274 }
275
276 /**
277 * igb_check_alt_mac_addr - Check for alternate MAC addr
278 * @hw: pointer to the HW structure
279 *
280 * Checks the nvm for an alternate MAC address. An alternate MAC address
281 * can be setup by pre-boot software and must be treated like a permanent
282 * address and must override the actual permanent MAC address. If an
283 * alternate MAC address is found it is saved in the hw struct and
284 * programmed into RAR0 and the function returns success, otherwise the
285 * function returns an error.
286 **/
igb_check_alt_mac_addr(struct e1000_hw * hw)287 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
288 {
289 u32 i;
290 s32 ret_val = 0;
291 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
292 u8 alt_mac_addr[ETH_ALEN];
293
294 /* Alternate MAC address is handled by the option ROM for 82580
295 * and newer. SW support not required.
296 */
297 if (hw->mac.type >= e1000_82580)
298 goto out;
299
300 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
301 &nvm_alt_mac_addr_offset);
302 if (ret_val) {
303 hw_dbg("NVM Read Error\n");
304 goto out;
305 }
306
307 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
308 (nvm_alt_mac_addr_offset == 0x0000))
309 /* There is no Alternate MAC Address */
310 goto out;
311
312 if (hw->bus.func == E1000_FUNC_1)
313 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
314 if (hw->bus.func == E1000_FUNC_2)
315 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
316
317 if (hw->bus.func == E1000_FUNC_3)
318 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
319 for (i = 0; i < ETH_ALEN; i += 2) {
320 offset = nvm_alt_mac_addr_offset + (i >> 1);
321 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
322 if (ret_val) {
323 hw_dbg("NVM Read Error\n");
324 goto out;
325 }
326
327 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
328 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
329 }
330
331 /* if multicast bit is set, the alternate address will not be used */
332 if (is_multicast_ether_addr(alt_mac_addr)) {
333 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
334 goto out;
335 }
336
337 /* We have a valid alternate MAC address, and we want to treat it the
338 * same as the normal permanent MAC address stored by the HW into the
339 * RAR. Do this by mapping this address into RAR0.
340 */
341 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
342
343 out:
344 return ret_val;
345 }
346
347 /**
348 * igb_rar_set - Set receive address register
349 * @hw: pointer to the HW structure
350 * @addr: pointer to the receive address
351 * @index: receive address array register
352 *
353 * Sets the receive address array register at index to the address passed
354 * in by addr.
355 **/
igb_rar_set(struct e1000_hw * hw,u8 * addr,u32 index)356 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
357 {
358 u32 rar_low, rar_high;
359
360 /* HW expects these in little endian so we reverse the byte order
361 * from network order (big endian) to little endian
362 */
363 rar_low = ((u32) addr[0] |
364 ((u32) addr[1] << 8) |
365 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
366
367 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
368
369 /* If MAC address zero, no need to set the AV bit */
370 if (rar_low || rar_high)
371 rar_high |= E1000_RAH_AV;
372
373 /* Some bridges will combine consecutive 32-bit writes into
374 * a single burst write, which will malfunction on some parts.
375 * The flushes avoid this.
376 */
377 wr32(E1000_RAL(index), rar_low);
378 wrfl();
379 wr32(E1000_RAH(index), rar_high);
380 wrfl();
381 }
382
383 /**
384 * igb_mta_set - Set multicast filter table address
385 * @hw: pointer to the HW structure
386 * @hash_value: determines the MTA register and bit to set
387 *
388 * The multicast table address is a register array of 32-bit registers.
389 * The hash_value is used to determine what register the bit is in, the
390 * current value is read, the new bit is OR'd in and the new value is
391 * written back into the register.
392 **/
igb_mta_set(struct e1000_hw * hw,u32 hash_value)393 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
394 {
395 u32 hash_bit, hash_reg, mta;
396
397 /* The MTA is a register array of 32-bit registers. It is
398 * treated like an array of (32*mta_reg_count) bits. We want to
399 * set bit BitArray[hash_value]. So we figure out what register
400 * the bit is in, read it, OR in the new bit, then write
401 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
402 * mask to bits 31:5 of the hash value which gives us the
403 * register we're modifying. The hash bit within that register
404 * is determined by the lower 5 bits of the hash value.
405 */
406 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
407 hash_bit = hash_value & 0x1F;
408
409 mta = array_rd32(E1000_MTA, hash_reg);
410
411 mta |= BIT(hash_bit);
412
413 array_wr32(E1000_MTA, hash_reg, mta);
414 wrfl();
415 }
416
417 /**
418 * igb_hash_mc_addr - Generate a multicast hash value
419 * @hw: pointer to the HW structure
420 * @mc_addr: pointer to a multicast address
421 *
422 * Generates a multicast address hash value which is used to determine
423 * the multicast filter table array address and new table value. See
424 * igb_mta_set()
425 **/
igb_hash_mc_addr(struct e1000_hw * hw,u8 * mc_addr)426 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
427 {
428 u32 hash_value, hash_mask;
429 u8 bit_shift = 1;
430
431 /* Register count multiplied by bits per register */
432 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
433
434 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
435 * where 0xFF would still fall within the hash mask.
436 */
437 while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
438 bit_shift++;
439
440 /* The portion of the address that is used for the hash table
441 * is determined by the mc_filter_type setting.
442 * The algorithm is such that there is a total of 8 bits of shifting.
443 * The bit_shift for a mc_filter_type of 0 represents the number of
444 * left-shifts where the MSB of mc_addr[5] would still fall within
445 * the hash_mask. Case 0 does this exactly. Since there are a total
446 * of 8 bits of shifting, then mc_addr[4] will shift right the
447 * remaining number of bits. Thus 8 - bit_shift. The rest of the
448 * cases are a variation of this algorithm...essentially raising the
449 * number of bits to shift mc_addr[5] left, while still keeping the
450 * 8-bit shifting total.
451 *
452 * For example, given the following Destination MAC Address and an
453 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
454 * we can see that the bit_shift for case 0 is 4. These are the hash
455 * values resulting from each mc_filter_type...
456 * [0] [1] [2] [3] [4] [5]
457 * 01 AA 00 12 34 56
458 * LSB MSB
459 *
460 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
461 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
462 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
463 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
464 */
465 switch (hw->mac.mc_filter_type) {
466 default:
467 case 0:
468 break;
469 case 1:
470 bit_shift += 1;
471 break;
472 case 2:
473 bit_shift += 2;
474 break;
475 case 3:
476 bit_shift += 4;
477 break;
478 }
479
480 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
481 (((u16) mc_addr[5]) << bit_shift)));
482
483 return hash_value;
484 }
485
486 /**
487 * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X
488 * @hw: pointer to the HW structure
489 *
490 * Checks if multicast array is wrote correctly
491 * If not then rewrites again to register
492 **/
igb_i21x_hw_doublecheck(struct e1000_hw * hw)493 static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
494 {
495 int failed_cnt = 3;
496 bool is_failed;
497 int i;
498
499 do {
500 is_failed = false;
501 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
502 if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) {
503 is_failed = true;
504 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
505 wrfl();
506 }
507 }
508 if (is_failed && --failed_cnt <= 0) {
509 hw_dbg("Failed to update MTA_REGISTER, too many retries");
510 break;
511 }
512 } while (is_failed);
513 }
514
515 /**
516 * igb_update_mc_addr_list - Update Multicast addresses
517 * @hw: pointer to the HW structure
518 * @mc_addr_list: array of multicast addresses to program
519 * @mc_addr_count: number of multicast addresses to program
520 *
521 * Updates entire Multicast Table Array.
522 * The caller must have a packed mc_addr_list of multicast addresses.
523 **/
igb_update_mc_addr_list(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)524 void igb_update_mc_addr_list(struct e1000_hw *hw,
525 u8 *mc_addr_list, u32 mc_addr_count)
526 {
527 u32 hash_value, hash_bit, hash_reg;
528 int i;
529
530 /* clear mta_shadow */
531 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
532
533 /* update mta_shadow from mc_addr_list */
534 for (i = 0; (u32) i < mc_addr_count; i++) {
535 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
536
537 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
538 hash_bit = hash_value & 0x1F;
539
540 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
541 mc_addr_list += (ETH_ALEN);
542 }
543
544 /* replace the entire MTA table */
545 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
546 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
547 wrfl();
548 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
549 igb_i21x_hw_doublecheck(hw);
550 }
551
552 /**
553 * igb_clear_hw_cntrs_base - Clear base hardware counters
554 * @hw: pointer to the HW structure
555 *
556 * Clears the base hardware counters by reading the counter registers.
557 **/
igb_clear_hw_cntrs_base(struct e1000_hw * hw)558 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
559 {
560 rd32(E1000_CRCERRS);
561 rd32(E1000_SYMERRS);
562 rd32(E1000_MPC);
563 rd32(E1000_SCC);
564 rd32(E1000_ECOL);
565 rd32(E1000_MCC);
566 rd32(E1000_LATECOL);
567 rd32(E1000_COLC);
568 rd32(E1000_DC);
569 rd32(E1000_SEC);
570 rd32(E1000_RLEC);
571 rd32(E1000_XONRXC);
572 rd32(E1000_XONTXC);
573 rd32(E1000_XOFFRXC);
574 rd32(E1000_XOFFTXC);
575 rd32(E1000_FCRUC);
576 rd32(E1000_GPRC);
577 rd32(E1000_BPRC);
578 rd32(E1000_MPRC);
579 rd32(E1000_GPTC);
580 rd32(E1000_GORCL);
581 rd32(E1000_GORCH);
582 rd32(E1000_GOTCL);
583 rd32(E1000_GOTCH);
584 rd32(E1000_RNBC);
585 rd32(E1000_RUC);
586 rd32(E1000_RFC);
587 rd32(E1000_ROC);
588 rd32(E1000_RJC);
589 rd32(E1000_TORL);
590 rd32(E1000_TORH);
591 rd32(E1000_TOTL);
592 rd32(E1000_TOTH);
593 rd32(E1000_TPR);
594 rd32(E1000_TPT);
595 rd32(E1000_MPTC);
596 rd32(E1000_BPTC);
597 }
598
599 /**
600 * igb_check_for_copper_link - Check for link (Copper)
601 * @hw: pointer to the HW structure
602 *
603 * Checks to see of the link status of the hardware has changed. If a
604 * change in link status has been detected, then we read the PHY registers
605 * to get the current speed/duplex if link exists.
606 **/
igb_check_for_copper_link(struct e1000_hw * hw)607 s32 igb_check_for_copper_link(struct e1000_hw *hw)
608 {
609 struct e1000_mac_info *mac = &hw->mac;
610 s32 ret_val;
611 bool link;
612
613 /* We only want to go out to the PHY registers to see if Auto-Neg
614 * has completed and/or if our link status has changed. The
615 * get_link_status flag is set upon receiving a Link Status
616 * Change or Rx Sequence Error interrupt.
617 */
618 if (!mac->get_link_status) {
619 ret_val = 0;
620 goto out;
621 }
622
623 /* First we want to see if the MII Status Register reports
624 * link. If so, then we want to get the current speed/duplex
625 * of the PHY.
626 */
627 ret_val = igb_phy_has_link(hw, 1, 0, &link);
628 if (ret_val)
629 goto out;
630
631 if (!link)
632 goto out; /* No link detected */
633
634 mac->get_link_status = false;
635
636 /* Check if there was DownShift, must be checked
637 * immediately after link-up
638 */
639 igb_check_downshift(hw);
640
641 /* If we are forcing speed/duplex, then we simply return since
642 * we have already determined whether we have link or not.
643 */
644 if (!mac->autoneg) {
645 ret_val = -E1000_ERR_CONFIG;
646 goto out;
647 }
648
649 /* Auto-Neg is enabled. Auto Speed Detection takes care
650 * of MAC speed/duplex configuration. So we only need to
651 * configure Collision Distance in the MAC.
652 */
653 igb_config_collision_dist(hw);
654
655 /* Configure Flow Control now that Auto-Neg has completed.
656 * First, we need to restore the desired flow control
657 * settings because we may have had to re-autoneg with a
658 * different link partner.
659 */
660 ret_val = igb_config_fc_after_link_up(hw);
661 if (ret_val)
662 hw_dbg("Error configuring flow control\n");
663
664 out:
665 return ret_val;
666 }
667
668 /**
669 * igb_setup_link - Setup flow control and link settings
670 * @hw: pointer to the HW structure
671 *
672 * Determines which flow control settings to use, then configures flow
673 * control. Calls the appropriate media-specific link configuration
674 * function. Assuming the adapter has a valid link partner, a valid link
675 * should be established. Assumes the hardware has previously been reset
676 * and the transmitter and receiver are not enabled.
677 **/
igb_setup_link(struct e1000_hw * hw)678 s32 igb_setup_link(struct e1000_hw *hw)
679 {
680 s32 ret_val = 0;
681
682 /* In the case of the phy reset being blocked, we already have a link.
683 * We do not need to set it up again.
684 */
685 if (igb_check_reset_block(hw))
686 goto out;
687
688 /* If requested flow control is set to default, set flow control
689 * based on the EEPROM flow control settings.
690 */
691 if (hw->fc.requested_mode == e1000_fc_default) {
692 ret_val = igb_set_default_fc(hw);
693 if (ret_val)
694 goto out;
695 }
696
697 /* We want to save off the original Flow Control configuration just
698 * in case we get disconnected and then reconnected into a different
699 * hub or switch with different Flow Control capabilities.
700 */
701 hw->fc.current_mode = hw->fc.requested_mode;
702
703 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
704
705 /* Call the necessary media_type subroutine to configure the link. */
706 ret_val = hw->mac.ops.setup_physical_interface(hw);
707 if (ret_val)
708 goto out;
709
710 /* Initialize the flow control address, type, and PAUSE timer
711 * registers to their default values. This is done even if flow
712 * control is disabled, because it does not hurt anything to
713 * initialize these registers.
714 */
715 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
716 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
717 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
718 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
719
720 wr32(E1000_FCTTV, hw->fc.pause_time);
721
722 igb_set_fc_watermarks(hw);
723
724 out:
725
726 return ret_val;
727 }
728
729 /**
730 * igb_config_collision_dist - Configure collision distance
731 * @hw: pointer to the HW structure
732 *
733 * Configures the collision distance to the default value and is used
734 * during link setup. Currently no func pointer exists and all
735 * implementations are handled in the generic version of this function.
736 **/
igb_config_collision_dist(struct e1000_hw * hw)737 void igb_config_collision_dist(struct e1000_hw *hw)
738 {
739 u32 tctl;
740
741 tctl = rd32(E1000_TCTL);
742
743 tctl &= ~E1000_TCTL_COLD;
744 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
745
746 wr32(E1000_TCTL, tctl);
747 wrfl();
748 }
749
750 /**
751 * igb_set_fc_watermarks - Set flow control high/low watermarks
752 * @hw: pointer to the HW structure
753 *
754 * Sets the flow control high/low threshold (watermark) registers. If
755 * flow control XON frame transmission is enabled, then set XON frame
756 * tansmission as well.
757 **/
igb_set_fc_watermarks(struct e1000_hw * hw)758 static void igb_set_fc_watermarks(struct e1000_hw *hw)
759 {
760 u32 fcrtl = 0, fcrth = 0;
761
762 /* Set the flow control receive threshold registers. Normally,
763 * these registers will be set to a default threshold that may be
764 * adjusted later by the driver's runtime code. However, if the
765 * ability to transmit pause frames is not enabled, then these
766 * registers will be set to 0.
767 */
768 if (hw->fc.current_mode & e1000_fc_tx_pause) {
769 /* We need to set up the Receive Threshold high and low water
770 * marks as well as (optionally) enabling the transmission of
771 * XON frames.
772 */
773 fcrtl = hw->fc.low_water;
774 if (hw->fc.send_xon)
775 fcrtl |= E1000_FCRTL_XONE;
776
777 fcrth = hw->fc.high_water;
778 }
779 wr32(E1000_FCRTL, fcrtl);
780 wr32(E1000_FCRTH, fcrth);
781 }
782
783 /**
784 * igb_set_default_fc - Set flow control default values
785 * @hw: pointer to the HW structure
786 *
787 * Read the EEPROM for the default values for flow control and store the
788 * values.
789 **/
igb_set_default_fc(struct e1000_hw * hw)790 static s32 igb_set_default_fc(struct e1000_hw *hw)
791 {
792 s32 ret_val = 0;
793 u16 lan_offset;
794 u16 nvm_data;
795
796 /* Read and store word 0x0F of the EEPROM. This word contains bits
797 * that determine the hardware's default PAUSE (flow control) mode,
798 * a bit that determines whether the HW defaults to enabling or
799 * disabling auto-negotiation, and the direction of the
800 * SW defined pins. If there is no SW over-ride of the flow
801 * control setting, then the variable hw->fc will
802 * be initialized based on a value in the EEPROM.
803 */
804 if (hw->mac.type == e1000_i350)
805 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
806 else
807 lan_offset = 0;
808
809 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
810 1, &nvm_data);
811 if (ret_val) {
812 hw_dbg("NVM Read Error\n");
813 goto out;
814 }
815
816 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
817 hw->fc.requested_mode = e1000_fc_none;
818 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
819 hw->fc.requested_mode = e1000_fc_tx_pause;
820 else
821 hw->fc.requested_mode = e1000_fc_full;
822
823 out:
824 return ret_val;
825 }
826
827 /**
828 * igb_force_mac_fc - Force the MAC's flow control settings
829 * @hw: pointer to the HW structure
830 *
831 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
832 * device control register to reflect the adapter settings. TFCE and RFCE
833 * need to be explicitly set by software when a copper PHY is used because
834 * autonegotiation is managed by the PHY rather than the MAC. Software must
835 * also configure these bits when link is forced on a fiber connection.
836 **/
igb_force_mac_fc(struct e1000_hw * hw)837 s32 igb_force_mac_fc(struct e1000_hw *hw)
838 {
839 u32 ctrl;
840 s32 ret_val = 0;
841
842 ctrl = rd32(E1000_CTRL);
843
844 /* Because we didn't get link via the internal auto-negotiation
845 * mechanism (we either forced link or we got link via PHY
846 * auto-neg), we have to manually enable/disable transmit an
847 * receive flow control.
848 *
849 * The "Case" statement below enables/disable flow control
850 * according to the "hw->fc.current_mode" parameter.
851 *
852 * The possible values of the "fc" parameter are:
853 * 0: Flow control is completely disabled
854 * 1: Rx flow control is enabled (we can receive pause
855 * frames but not send pause frames).
856 * 2: Tx flow control is enabled (we can send pause frames
857 * but we do not receive pause frames).
858 * 3: Both Rx and TX flow control (symmetric) is enabled.
859 * other: No other values should be possible at this point.
860 */
861 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
862
863 switch (hw->fc.current_mode) {
864 case e1000_fc_none:
865 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
866 break;
867 case e1000_fc_rx_pause:
868 ctrl &= (~E1000_CTRL_TFCE);
869 ctrl |= E1000_CTRL_RFCE;
870 break;
871 case e1000_fc_tx_pause:
872 ctrl &= (~E1000_CTRL_RFCE);
873 ctrl |= E1000_CTRL_TFCE;
874 break;
875 case e1000_fc_full:
876 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
877 break;
878 default:
879 hw_dbg("Flow control param set incorrectly\n");
880 ret_val = -E1000_ERR_CONFIG;
881 goto out;
882 }
883
884 wr32(E1000_CTRL, ctrl);
885
886 out:
887 return ret_val;
888 }
889
890 /**
891 * igb_config_fc_after_link_up - Configures flow control after link
892 * @hw: pointer to the HW structure
893 *
894 * Checks the status of auto-negotiation after link up to ensure that the
895 * speed and duplex were not forced. If the link needed to be forced, then
896 * flow control needs to be forced also. If auto-negotiation is enabled
897 * and did not fail, then we configure flow control based on our link
898 * partner.
899 **/
igb_config_fc_after_link_up(struct e1000_hw * hw)900 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
901 {
902 struct e1000_mac_info *mac = &hw->mac;
903 s32 ret_val = 0;
904 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
905 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
906 u16 speed, duplex;
907
908 /* Check for the case where we have fiber media and auto-neg failed
909 * so we had to force link. In this case, we need to force the
910 * configuration of the MAC to match the "fc" parameter.
911 */
912 if (mac->autoneg_failed) {
913 if (hw->phy.media_type == e1000_media_type_internal_serdes)
914 ret_val = igb_force_mac_fc(hw);
915 } else {
916 if (hw->phy.media_type == e1000_media_type_copper)
917 ret_val = igb_force_mac_fc(hw);
918 }
919
920 if (ret_val) {
921 hw_dbg("Error forcing flow control settings\n");
922 goto out;
923 }
924
925 /* Check for the case where we have copper media and auto-neg is
926 * enabled. In this case, we need to check and see if Auto-Neg
927 * has completed, and if so, how the PHY and link partner has
928 * flow control configured.
929 */
930 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
931 /* Read the MII Status Register and check to see if AutoNeg
932 * has completed. We read this twice because this reg has
933 * some "sticky" (latched) bits.
934 */
935 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
936 &mii_status_reg);
937 if (ret_val)
938 goto out;
939 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
940 &mii_status_reg);
941 if (ret_val)
942 goto out;
943
944 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
945 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
946 goto out;
947 }
948
949 /* The AutoNeg process has completed, so we now need to
950 * read both the Auto Negotiation Advertisement
951 * Register (Address 4) and the Auto_Negotiation Base
952 * Page Ability Register (Address 5) to determine how
953 * flow control was negotiated.
954 */
955 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
956 &mii_nway_adv_reg);
957 if (ret_val)
958 goto out;
959 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
960 &mii_nway_lp_ability_reg);
961 if (ret_val)
962 goto out;
963
964 /* Two bits in the Auto Negotiation Advertisement Register
965 * (Address 4) and two bits in the Auto Negotiation Base
966 * Page Ability Register (Address 5) determine flow control
967 * for both the PHY and the link partner. The following
968 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
969 * 1999, describes these PAUSE resolution bits and how flow
970 * control is determined based upon these settings.
971 * NOTE: DC = Don't Care
972 *
973 * LOCAL DEVICE | LINK PARTNER
974 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
975 *-------|---------|-------|---------|--------------------
976 * 0 | 0 | DC | DC | e1000_fc_none
977 * 0 | 1 | 0 | DC | e1000_fc_none
978 * 0 | 1 | 1 | 0 | e1000_fc_none
979 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
980 * 1 | 0 | 0 | DC | e1000_fc_none
981 * 1 | DC | 1 | DC | e1000_fc_full
982 * 1 | 1 | 0 | 0 | e1000_fc_none
983 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
984 *
985 * Are both PAUSE bits set to 1? If so, this implies
986 * Symmetric Flow Control is enabled at both ends. The
987 * ASM_DIR bits are irrelevant per the spec.
988 *
989 * For Symmetric Flow Control:
990 *
991 * LOCAL DEVICE | LINK PARTNER
992 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
993 *-------|---------|-------|---------|--------------------
994 * 1 | DC | 1 | DC | E1000_fc_full
995 *
996 */
997 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
998 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
999 /* Now we need to check if the user selected RX ONLY
1000 * of pause frames. In this case, we had to advertise
1001 * FULL flow control because we could not advertise RX
1002 * ONLY. Hence, we must now check to see if we need to
1003 * turn OFF the TRANSMISSION of PAUSE frames.
1004 */
1005 if (hw->fc.requested_mode == e1000_fc_full) {
1006 hw->fc.current_mode = e1000_fc_full;
1007 hw_dbg("Flow Control = FULL.\n");
1008 } else {
1009 hw->fc.current_mode = e1000_fc_rx_pause;
1010 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1011 }
1012 }
1013 /* For receiving PAUSE frames ONLY.
1014 *
1015 * LOCAL DEVICE | LINK PARTNER
1016 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1017 *-------|---------|-------|---------|--------------------
1018 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1019 */
1020 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1021 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1022 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1023 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1024 hw->fc.current_mode = e1000_fc_tx_pause;
1025 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1026 }
1027 /* For transmitting PAUSE frames ONLY.
1028 *
1029 * LOCAL DEVICE | LINK PARTNER
1030 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1031 *-------|---------|-------|---------|--------------------
1032 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1033 */
1034 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1035 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1036 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1037 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1038 hw->fc.current_mode = e1000_fc_rx_pause;
1039 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1040 }
1041 /* Per the IEEE spec, at this point flow control should be
1042 * disabled. However, we want to consider that we could
1043 * be connected to a legacy switch that doesn't advertise
1044 * desired flow control, but can be forced on the link
1045 * partner. So if we advertised no flow control, that is
1046 * what we will resolve to. If we advertised some kind of
1047 * receive capability (Rx Pause Only or Full Flow Control)
1048 * and the link partner advertised none, we will configure
1049 * ourselves to enable Rx Flow Control only. We can do
1050 * this safely for two reasons: If the link partner really
1051 * didn't want flow control enabled, and we enable Rx, no
1052 * harm done since we won't be receiving any PAUSE frames
1053 * anyway. If the intent on the link partner was to have
1054 * flow control enabled, then by us enabling RX only, we
1055 * can at least receive pause frames and process them.
1056 * This is a good idea because in most cases, since we are
1057 * predominantly a server NIC, more times than not we will
1058 * be asked to delay transmission of packets than asking
1059 * our link partner to pause transmission of frames.
1060 */
1061 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1062 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1063 (hw->fc.strict_ieee)) {
1064 hw->fc.current_mode = e1000_fc_none;
1065 hw_dbg("Flow Control = NONE.\n");
1066 } else {
1067 hw->fc.current_mode = e1000_fc_rx_pause;
1068 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1069 }
1070
1071 /* Now we need to do one last check... If we auto-
1072 * negotiated to HALF DUPLEX, flow control should not be
1073 * enabled per IEEE 802.3 spec.
1074 */
1075 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1076 if (ret_val) {
1077 hw_dbg("Error getting link speed and duplex\n");
1078 goto out;
1079 }
1080
1081 if (duplex == HALF_DUPLEX)
1082 hw->fc.current_mode = e1000_fc_none;
1083
1084 /* Now we call a subroutine to actually force the MAC
1085 * controller to use the correct flow control settings.
1086 */
1087 ret_val = igb_force_mac_fc(hw);
1088 if (ret_val) {
1089 hw_dbg("Error forcing flow control settings\n");
1090 goto out;
1091 }
1092 }
1093 /* Check for the case where we have SerDes media and auto-neg is
1094 * enabled. In this case, we need to check and see if Auto-Neg
1095 * has completed, and if so, how the PHY and link partner has
1096 * flow control configured.
1097 */
1098 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1099 && mac->autoneg) {
1100 /* Read the PCS_LSTS and check to see if AutoNeg
1101 * has completed.
1102 */
1103 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1104
1105 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1106 hw_dbg("PCS Auto Neg has not completed.\n");
1107 return ret_val;
1108 }
1109
1110 /* The AutoNeg process has completed, so we now need to
1111 * read both the Auto Negotiation Advertisement
1112 * Register (PCS_ANADV) and the Auto_Negotiation Base
1113 * Page Ability Register (PCS_LPAB) to determine how
1114 * flow control was negotiated.
1115 */
1116 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1117 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1118
1119 /* Two bits in the Auto Negotiation Advertisement Register
1120 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1121 * Page Ability Register (PCS_LPAB) determine flow control
1122 * for both the PHY and the link partner. The following
1123 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1124 * 1999, describes these PAUSE resolution bits and how flow
1125 * control is determined based upon these settings.
1126 * NOTE: DC = Don't Care
1127 *
1128 * LOCAL DEVICE | LINK PARTNER
1129 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1130 *-------|---------|-------|---------|--------------------
1131 * 0 | 0 | DC | DC | e1000_fc_none
1132 * 0 | 1 | 0 | DC | e1000_fc_none
1133 * 0 | 1 | 1 | 0 | e1000_fc_none
1134 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1135 * 1 | 0 | 0 | DC | e1000_fc_none
1136 * 1 | DC | 1 | DC | e1000_fc_full
1137 * 1 | 1 | 0 | 0 | e1000_fc_none
1138 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1139 *
1140 * Are both PAUSE bits set to 1? If so, this implies
1141 * Symmetric Flow Control is enabled at both ends. The
1142 * ASM_DIR bits are irrelevant per the spec.
1143 *
1144 * For Symmetric Flow Control:
1145 *
1146 * LOCAL DEVICE | LINK PARTNER
1147 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1148 *-------|---------|-------|---------|--------------------
1149 * 1 | DC | 1 | DC | e1000_fc_full
1150 *
1151 */
1152 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1153 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1154 /* Now we need to check if the user selected Rx ONLY
1155 * of pause frames. In this case, we had to advertise
1156 * FULL flow control because we could not advertise Rx
1157 * ONLY. Hence, we must now check to see if we need to
1158 * turn OFF the TRANSMISSION of PAUSE frames.
1159 */
1160 if (hw->fc.requested_mode == e1000_fc_full) {
1161 hw->fc.current_mode = e1000_fc_full;
1162 hw_dbg("Flow Control = FULL.\n");
1163 } else {
1164 hw->fc.current_mode = e1000_fc_rx_pause;
1165 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1166 }
1167 }
1168 /* For receiving PAUSE frames ONLY.
1169 *
1170 * LOCAL DEVICE | LINK PARTNER
1171 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1172 *-------|---------|-------|---------|--------------------
1173 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1174 */
1175 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1176 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1177 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1178 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1179 hw->fc.current_mode = e1000_fc_tx_pause;
1180 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1181 }
1182 /* For transmitting PAUSE frames ONLY.
1183 *
1184 * LOCAL DEVICE | LINK PARTNER
1185 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1186 *-------|---------|-------|---------|--------------------
1187 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1188 */
1189 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1190 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1191 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1192 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1193 hw->fc.current_mode = e1000_fc_rx_pause;
1194 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1195 } else {
1196 /* Per the IEEE spec, at this point flow control
1197 * should be disabled.
1198 */
1199 hw->fc.current_mode = e1000_fc_none;
1200 hw_dbg("Flow Control = NONE.\n");
1201 }
1202
1203 /* Now we call a subroutine to actually force the MAC
1204 * controller to use the correct flow control settings.
1205 */
1206 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1207 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1208 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1209
1210 ret_val = igb_force_mac_fc(hw);
1211 if (ret_val) {
1212 hw_dbg("Error forcing flow control settings\n");
1213 return ret_val;
1214 }
1215 }
1216
1217 out:
1218 return ret_val;
1219 }
1220
1221 /**
1222 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1223 * @hw: pointer to the HW structure
1224 * @speed: stores the current speed
1225 * @duplex: stores the current duplex
1226 *
1227 * Read the status register for the current speed/duplex and store the current
1228 * speed and duplex for copper connections.
1229 **/
igb_get_speed_and_duplex_copper(struct e1000_hw * hw,u16 * speed,u16 * duplex)1230 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1231 u16 *duplex)
1232 {
1233 u32 status;
1234
1235 status = rd32(E1000_STATUS);
1236 if (status & E1000_STATUS_SPEED_1000) {
1237 *speed = SPEED_1000;
1238 hw_dbg("1000 Mbs, ");
1239 } else if (status & E1000_STATUS_SPEED_100) {
1240 *speed = SPEED_100;
1241 hw_dbg("100 Mbs, ");
1242 } else {
1243 *speed = SPEED_10;
1244 hw_dbg("10 Mbs, ");
1245 }
1246
1247 if (status & E1000_STATUS_FD) {
1248 *duplex = FULL_DUPLEX;
1249 hw_dbg("Full Duplex\n");
1250 } else {
1251 *duplex = HALF_DUPLEX;
1252 hw_dbg("Half Duplex\n");
1253 }
1254
1255 return 0;
1256 }
1257
1258 /**
1259 * igb_get_hw_semaphore - Acquire hardware semaphore
1260 * @hw: pointer to the HW structure
1261 *
1262 * Acquire the HW semaphore to access the PHY or NVM
1263 **/
igb_get_hw_semaphore(struct e1000_hw * hw)1264 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1265 {
1266 u32 swsm;
1267 s32 ret_val = 0;
1268 s32 timeout = hw->nvm.word_size + 1;
1269 s32 i = 0;
1270
1271 /* Get the SW semaphore */
1272 while (i < timeout) {
1273 swsm = rd32(E1000_SWSM);
1274 if (!(swsm & E1000_SWSM_SMBI))
1275 break;
1276
1277 udelay(50);
1278 i++;
1279 }
1280
1281 if (i == timeout) {
1282 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1283 ret_val = -E1000_ERR_NVM;
1284 goto out;
1285 }
1286
1287 /* Get the FW semaphore. */
1288 for (i = 0; i < timeout; i++) {
1289 swsm = rd32(E1000_SWSM);
1290 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1291
1292 /* Semaphore acquired if bit latched */
1293 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1294 break;
1295
1296 udelay(50);
1297 }
1298
1299 if (i == timeout) {
1300 /* Release semaphores */
1301 igb_put_hw_semaphore(hw);
1302 hw_dbg("Driver can't access the NVM\n");
1303 ret_val = -E1000_ERR_NVM;
1304 goto out;
1305 }
1306
1307 out:
1308 return ret_val;
1309 }
1310
1311 /**
1312 * igb_put_hw_semaphore - Release hardware semaphore
1313 * @hw: pointer to the HW structure
1314 *
1315 * Release hardware semaphore used to access the PHY or NVM
1316 **/
igb_put_hw_semaphore(struct e1000_hw * hw)1317 void igb_put_hw_semaphore(struct e1000_hw *hw)
1318 {
1319 u32 swsm;
1320
1321 swsm = rd32(E1000_SWSM);
1322
1323 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1324
1325 wr32(E1000_SWSM, swsm);
1326 }
1327
1328 /**
1329 * igb_get_auto_rd_done - Check for auto read completion
1330 * @hw: pointer to the HW structure
1331 *
1332 * Check EEPROM for Auto Read done bit.
1333 **/
igb_get_auto_rd_done(struct e1000_hw * hw)1334 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1335 {
1336 s32 i = 0;
1337 s32 ret_val = 0;
1338
1339
1340 while (i < AUTO_READ_DONE_TIMEOUT) {
1341 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1342 break;
1343 usleep_range(1000, 2000);
1344 i++;
1345 }
1346
1347 if (i == AUTO_READ_DONE_TIMEOUT) {
1348 hw_dbg("Auto read by HW from NVM has not completed.\n");
1349 ret_val = -E1000_ERR_RESET;
1350 goto out;
1351 }
1352
1353 out:
1354 return ret_val;
1355 }
1356
1357 /**
1358 * igb_valid_led_default - Verify a valid default LED config
1359 * @hw: pointer to the HW structure
1360 * @data: pointer to the NVM (EEPROM)
1361 *
1362 * Read the EEPROM for the current default LED configuration. If the
1363 * LED configuration is not valid, set to a valid LED configuration.
1364 **/
igb_valid_led_default(struct e1000_hw * hw,u16 * data)1365 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1366 {
1367 s32 ret_val;
1368
1369 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1370 if (ret_val) {
1371 hw_dbg("NVM Read Error\n");
1372 goto out;
1373 }
1374
1375 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1376 switch (hw->phy.media_type) {
1377 case e1000_media_type_internal_serdes:
1378 *data = ID_LED_DEFAULT_82575_SERDES;
1379 break;
1380 case e1000_media_type_copper:
1381 default:
1382 *data = ID_LED_DEFAULT;
1383 break;
1384 }
1385 }
1386 out:
1387 return ret_val;
1388 }
1389
1390 /**
1391 * igb_id_led_init -
1392 * @hw: pointer to the HW structure
1393 *
1394 **/
igb_id_led_init(struct e1000_hw * hw)1395 s32 igb_id_led_init(struct e1000_hw *hw)
1396 {
1397 struct e1000_mac_info *mac = &hw->mac;
1398 s32 ret_val;
1399 const u32 ledctl_mask = 0x000000FF;
1400 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1401 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1402 u16 data, i, temp;
1403 const u16 led_mask = 0x0F;
1404
1405 /* i210 and i211 devices have different LED mechanism */
1406 if ((hw->mac.type == e1000_i210) ||
1407 (hw->mac.type == e1000_i211))
1408 ret_val = igb_valid_led_default_i210(hw, &data);
1409 else
1410 ret_val = igb_valid_led_default(hw, &data);
1411
1412 if (ret_val)
1413 goto out;
1414
1415 mac->ledctl_default = rd32(E1000_LEDCTL);
1416 mac->ledctl_mode1 = mac->ledctl_default;
1417 mac->ledctl_mode2 = mac->ledctl_default;
1418
1419 for (i = 0; i < 4; i++) {
1420 temp = (data >> (i << 2)) & led_mask;
1421 switch (temp) {
1422 case ID_LED_ON1_DEF2:
1423 case ID_LED_ON1_ON2:
1424 case ID_LED_ON1_OFF2:
1425 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1426 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1427 break;
1428 case ID_LED_OFF1_DEF2:
1429 case ID_LED_OFF1_ON2:
1430 case ID_LED_OFF1_OFF2:
1431 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1432 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1433 break;
1434 default:
1435 /* Do nothing */
1436 break;
1437 }
1438 switch (temp) {
1439 case ID_LED_DEF1_ON2:
1440 case ID_LED_ON1_ON2:
1441 case ID_LED_OFF1_ON2:
1442 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1443 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1444 break;
1445 case ID_LED_DEF1_OFF2:
1446 case ID_LED_ON1_OFF2:
1447 case ID_LED_OFF1_OFF2:
1448 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1449 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1450 break;
1451 default:
1452 /* Do nothing */
1453 break;
1454 }
1455 }
1456
1457 out:
1458 return ret_val;
1459 }
1460
1461 /**
1462 * igb_cleanup_led - Set LED config to default operation
1463 * @hw: pointer to the HW structure
1464 *
1465 * Remove the current LED configuration and set the LED configuration
1466 * to the default value, saved from the EEPROM.
1467 **/
igb_cleanup_led(struct e1000_hw * hw)1468 s32 igb_cleanup_led(struct e1000_hw *hw)
1469 {
1470 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1471 return 0;
1472 }
1473
1474 /**
1475 * igb_blink_led - Blink LED
1476 * @hw: pointer to the HW structure
1477 *
1478 * Blink the led's which are set to be on.
1479 **/
igb_blink_led(struct e1000_hw * hw)1480 s32 igb_blink_led(struct e1000_hw *hw)
1481 {
1482 u32 ledctl_blink = 0;
1483 u32 i;
1484
1485 if (hw->phy.media_type == e1000_media_type_fiber) {
1486 /* always blink LED0 for PCI-E fiber */
1487 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1488 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1489 } else {
1490 /* Set the blink bit for each LED that's "on" (0x0E)
1491 * (or "off" if inverted) in ledctl_mode2. The blink
1492 * logic in hardware only works when mode is set to "on"
1493 * so it must be changed accordingly when the mode is
1494 * "off" and inverted.
1495 */
1496 ledctl_blink = hw->mac.ledctl_mode2;
1497 for (i = 0; i < 32; i += 8) {
1498 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1499 E1000_LEDCTL_LED0_MODE_MASK;
1500 u32 led_default = hw->mac.ledctl_default >> i;
1501
1502 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1503 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1504 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1505 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1506 ledctl_blink &=
1507 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1508 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1509 E1000_LEDCTL_MODE_LED_ON) << i;
1510 }
1511 }
1512 }
1513
1514 wr32(E1000_LEDCTL, ledctl_blink);
1515
1516 return 0;
1517 }
1518
1519 /**
1520 * igb_led_off - Turn LED off
1521 * @hw: pointer to the HW structure
1522 *
1523 * Turn LED off.
1524 **/
igb_led_off(struct e1000_hw * hw)1525 s32 igb_led_off(struct e1000_hw *hw)
1526 {
1527 switch (hw->phy.media_type) {
1528 case e1000_media_type_copper:
1529 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1530 break;
1531 default:
1532 break;
1533 }
1534
1535 return 0;
1536 }
1537
1538 /**
1539 * igb_disable_pcie_master - Disables PCI-express master access
1540 * @hw: pointer to the HW structure
1541 *
1542 * Returns 0 (0) if successful, else returns -10
1543 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1544 * the master requests to be disabled.
1545 *
1546 * Disables PCI-Express master access and verifies there are no pending
1547 * requests.
1548 **/
igb_disable_pcie_master(struct e1000_hw * hw)1549 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1550 {
1551 u32 ctrl;
1552 s32 timeout = MASTER_DISABLE_TIMEOUT;
1553 s32 ret_val = 0;
1554
1555 if (hw->bus.type != e1000_bus_type_pci_express)
1556 goto out;
1557
1558 ctrl = rd32(E1000_CTRL);
1559 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1560 wr32(E1000_CTRL, ctrl);
1561
1562 while (timeout) {
1563 if (!(rd32(E1000_STATUS) &
1564 E1000_STATUS_GIO_MASTER_ENABLE))
1565 break;
1566 udelay(100);
1567 timeout--;
1568 }
1569
1570 if (!timeout) {
1571 hw_dbg("Master requests are pending.\n");
1572 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1573 goto out;
1574 }
1575
1576 out:
1577 return ret_val;
1578 }
1579
1580 /**
1581 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1582 * @hw: pointer to the HW structure
1583 *
1584 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1585 * set, which is forced to MDI mode only.
1586 **/
igb_validate_mdi_setting(struct e1000_hw * hw)1587 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1588 {
1589 s32 ret_val = 0;
1590
1591 /* All MDI settings are supported on 82580 and newer. */
1592 if (hw->mac.type >= e1000_82580)
1593 goto out;
1594
1595 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1596 hw_dbg("Invalid MDI setting detected\n");
1597 hw->phy.mdix = 1;
1598 ret_val = -E1000_ERR_CONFIG;
1599 goto out;
1600 }
1601
1602 out:
1603 return ret_val;
1604 }
1605
1606 /**
1607 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1608 * @hw: pointer to the HW structure
1609 * @reg: 32bit register offset such as E1000_SCTL
1610 * @offset: register offset to write to
1611 * @data: data to write at register offset
1612 *
1613 * Writes an address/data control type register. There are several of these
1614 * and they all have the format address << 8 | data and bit 31 is polled for
1615 * completion.
1616 **/
igb_write_8bit_ctrl_reg(struct e1000_hw * hw,u32 reg,u32 offset,u8 data)1617 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1618 u32 offset, u8 data)
1619 {
1620 u32 i, regvalue = 0;
1621 s32 ret_val = 0;
1622
1623 /* Set up the address and data */
1624 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1625 wr32(reg, regvalue);
1626
1627 /* Poll the ready bit to see if the MDI read completed */
1628 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1629 udelay(5);
1630 regvalue = rd32(reg);
1631 if (regvalue & E1000_GEN_CTL_READY)
1632 break;
1633 }
1634 if (!(regvalue & E1000_GEN_CTL_READY)) {
1635 hw_dbg("Reg %08x did not indicate ready\n", reg);
1636 ret_val = -E1000_ERR_PHY;
1637 goto out;
1638 }
1639
1640 out:
1641 return ret_val;
1642 }
1643
1644 /**
1645 * igb_enable_mng_pass_thru - Enable processing of ARP's
1646 * @hw: pointer to the HW structure
1647 *
1648 * Verifies the hardware needs to leave interface enabled so that frames can
1649 * be directed to and from the management interface.
1650 **/
igb_enable_mng_pass_thru(struct e1000_hw * hw)1651 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1652 {
1653 u32 manc;
1654 u32 fwsm, factps;
1655 bool ret_val = false;
1656
1657 if (!hw->mac.asf_firmware_present)
1658 goto out;
1659
1660 manc = rd32(E1000_MANC);
1661
1662 if (!(manc & E1000_MANC_RCV_TCO_EN))
1663 goto out;
1664
1665 if (hw->mac.arc_subsystem_valid) {
1666 fwsm = rd32(E1000_FWSM);
1667 factps = rd32(E1000_FACTPS);
1668
1669 if (!(factps & E1000_FACTPS_MNGCG) &&
1670 ((fwsm & E1000_FWSM_MODE_MASK) ==
1671 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1672 ret_val = true;
1673 goto out;
1674 }
1675 } else {
1676 if ((manc & E1000_MANC_SMBUS_EN) &&
1677 !(manc & E1000_MANC_ASF_EN)) {
1678 ret_val = true;
1679 goto out;
1680 }
1681 }
1682
1683 out:
1684 return ret_val;
1685 }
1686