1  // SPDX-License-Identifier: GPL-2.0-only
2  /******************************************************************************
3   *
4   * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
5   *
6   * Contact Information:
7   *  Intel Linux Wireless <ilw@linux.intel.com>
8   * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9   *****************************************************************************/
10  
11  #include <linux/kernel.h>
12  #include <linux/module.h>
13  #include <linux/etherdevice.h>
14  #include <linux/sched.h>
15  #include <linux/slab.h>
16  #include <linux/types.h>
17  #include <linux/lockdep.h>
18  #include <linux/pci.h>
19  #include <linux/dma-mapping.h>
20  #include <linux/delay.h>
21  #include <linux/skbuff.h>
22  #include <net/mac80211.h>
23  
24  #include "common.h"
25  
26  int
_il_poll_bit(struct il_priv * il,u32 addr,u32 bits,u32 mask,int timeout)27  _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
28  {
29  	const int interval = 10; /* microseconds */
30  	int t = 0;
31  
32  	do {
33  		if ((_il_rd(il, addr) & mask) == (bits & mask))
34  			return t;
35  		udelay(interval);
36  		t += interval;
37  	} while (t < timeout);
38  
39  	return -ETIMEDOUT;
40  }
41  EXPORT_SYMBOL(_il_poll_bit);
42  
43  void
il_set_bit(struct il_priv * p,u32 r,u32 m)44  il_set_bit(struct il_priv *p, u32 r, u32 m)
45  {
46  	unsigned long reg_flags;
47  
48  	spin_lock_irqsave(&p->reg_lock, reg_flags);
49  	_il_set_bit(p, r, m);
50  	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
51  }
52  EXPORT_SYMBOL(il_set_bit);
53  
54  void
il_clear_bit(struct il_priv * p,u32 r,u32 m)55  il_clear_bit(struct il_priv *p, u32 r, u32 m)
56  {
57  	unsigned long reg_flags;
58  
59  	spin_lock_irqsave(&p->reg_lock, reg_flags);
60  	_il_clear_bit(p, r, m);
61  	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
62  }
63  EXPORT_SYMBOL(il_clear_bit);
64  
65  bool
_il_grab_nic_access(struct il_priv * il)66  _il_grab_nic_access(struct il_priv *il)
67  {
68  	int ret;
69  	u32 val;
70  
71  	/* this bit wakes up the NIC */
72  	_il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
73  
74  	/*
75  	 * These bits say the device is running, and should keep running for
76  	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
77  	 * but they do not indicate that embedded SRAM is restored yet;
78  	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
79  	 * to/from host DRAM when sleeping/waking for power-saving.
80  	 * Each direction takes approximately 1/4 millisecond; with this
81  	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
82  	 * series of register accesses are expected (e.g. reading Event Log),
83  	 * to keep device from sleeping.
84  	 *
85  	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
86  	 * SRAM is okay/restored.  We don't check that here because this call
87  	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
88  	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
89  	 *
90  	 */
91  	ret =
92  	    _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
93  			 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
94  			  CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
95  	if (unlikely(ret < 0)) {
96  		val = _il_rd(il, CSR_GP_CNTRL);
97  		WARN_ONCE(1, "Timeout waiting for ucode processor access "
98  			     "(CSR_GP_CNTRL 0x%08x)\n", val);
99  		_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
100  		return false;
101  	}
102  
103  	return true;
104  }
105  EXPORT_SYMBOL_GPL(_il_grab_nic_access);
106  
107  int
il_poll_bit(struct il_priv * il,u32 addr,u32 mask,int timeout)108  il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
109  {
110  	const int interval = 10; /* microseconds */
111  	int t = 0;
112  
113  	do {
114  		if ((il_rd(il, addr) & mask) == mask)
115  			return t;
116  		udelay(interval);
117  		t += interval;
118  	} while (t < timeout);
119  
120  	return -ETIMEDOUT;
121  }
122  EXPORT_SYMBOL(il_poll_bit);
123  
124  u32
il_rd_prph(struct il_priv * il,u32 reg)125  il_rd_prph(struct il_priv *il, u32 reg)
126  {
127  	unsigned long reg_flags;
128  	u32 val;
129  
130  	spin_lock_irqsave(&il->reg_lock, reg_flags);
131  	_il_grab_nic_access(il);
132  	val = _il_rd_prph(il, reg);
133  	_il_release_nic_access(il);
134  	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
135  	return val;
136  }
137  EXPORT_SYMBOL(il_rd_prph);
138  
139  void
il_wr_prph(struct il_priv * il,u32 addr,u32 val)140  il_wr_prph(struct il_priv *il, u32 addr, u32 val)
141  {
142  	unsigned long reg_flags;
143  
144  	spin_lock_irqsave(&il->reg_lock, reg_flags);
145  	if (likely(_il_grab_nic_access(il))) {
146  		_il_wr_prph(il, addr, val);
147  		_il_release_nic_access(il);
148  	}
149  	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
150  }
151  EXPORT_SYMBOL(il_wr_prph);
152  
153  u32
il_read_targ_mem(struct il_priv * il,u32 addr)154  il_read_targ_mem(struct il_priv *il, u32 addr)
155  {
156  	unsigned long reg_flags;
157  	u32 value;
158  
159  	spin_lock_irqsave(&il->reg_lock, reg_flags);
160  	_il_grab_nic_access(il);
161  
162  	_il_wr(il, HBUS_TARG_MEM_RADDR, addr);
163  	value = _il_rd(il, HBUS_TARG_MEM_RDAT);
164  
165  	_il_release_nic_access(il);
166  	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
167  	return value;
168  }
169  EXPORT_SYMBOL(il_read_targ_mem);
170  
171  void
il_write_targ_mem(struct il_priv * il,u32 addr,u32 val)172  il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
173  {
174  	unsigned long reg_flags;
175  
176  	spin_lock_irqsave(&il->reg_lock, reg_flags);
177  	if (likely(_il_grab_nic_access(il))) {
178  		_il_wr(il, HBUS_TARG_MEM_WADDR, addr);
179  		_il_wr(il, HBUS_TARG_MEM_WDAT, val);
180  		_il_release_nic_access(il);
181  	}
182  	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
183  }
184  EXPORT_SYMBOL(il_write_targ_mem);
185  
186  const char *
il_get_cmd_string(u8 cmd)187  il_get_cmd_string(u8 cmd)
188  {
189  	switch (cmd) {
190  		IL_CMD(N_ALIVE);
191  		IL_CMD(N_ERROR);
192  		IL_CMD(C_RXON);
193  		IL_CMD(C_RXON_ASSOC);
194  		IL_CMD(C_QOS_PARAM);
195  		IL_CMD(C_RXON_TIMING);
196  		IL_CMD(C_ADD_STA);
197  		IL_CMD(C_REM_STA);
198  		IL_CMD(C_WEPKEY);
199  		IL_CMD(N_3945_RX);
200  		IL_CMD(C_TX);
201  		IL_CMD(C_RATE_SCALE);
202  		IL_CMD(C_LEDS);
203  		IL_CMD(C_TX_LINK_QUALITY_CMD);
204  		IL_CMD(C_CHANNEL_SWITCH);
205  		IL_CMD(N_CHANNEL_SWITCH);
206  		IL_CMD(C_SPECTRUM_MEASUREMENT);
207  		IL_CMD(N_SPECTRUM_MEASUREMENT);
208  		IL_CMD(C_POWER_TBL);
209  		IL_CMD(N_PM_SLEEP);
210  		IL_CMD(N_PM_DEBUG_STATS);
211  		IL_CMD(C_SCAN);
212  		IL_CMD(C_SCAN_ABORT);
213  		IL_CMD(N_SCAN_START);
214  		IL_CMD(N_SCAN_RESULTS);
215  		IL_CMD(N_SCAN_COMPLETE);
216  		IL_CMD(N_BEACON);
217  		IL_CMD(C_TX_BEACON);
218  		IL_CMD(C_TX_PWR_TBL);
219  		IL_CMD(C_BT_CONFIG);
220  		IL_CMD(C_STATS);
221  		IL_CMD(N_STATS);
222  		IL_CMD(N_CARD_STATE);
223  		IL_CMD(N_MISSED_BEACONS);
224  		IL_CMD(C_CT_KILL_CONFIG);
225  		IL_CMD(C_SENSITIVITY);
226  		IL_CMD(C_PHY_CALIBRATION);
227  		IL_CMD(N_RX_PHY);
228  		IL_CMD(N_RX_MPDU);
229  		IL_CMD(N_RX);
230  		IL_CMD(N_COMPRESSED_BA);
231  	default:
232  		return "UNKNOWN";
233  
234  	}
235  }
236  EXPORT_SYMBOL(il_get_cmd_string);
237  
238  #define HOST_COMPLETE_TIMEOUT (HZ / 2)
239  
240  static void
il_generic_cmd_callback(struct il_priv * il,struct il_device_cmd * cmd,struct il_rx_pkt * pkt)241  il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
242  			struct il_rx_pkt *pkt)
243  {
244  	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
245  		IL_ERR("Bad return from %s (0x%08X)\n",
246  		       il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
247  		return;
248  	}
249  #ifdef CONFIG_IWLEGACY_DEBUG
250  	switch (cmd->hdr.cmd) {
251  	case C_TX_LINK_QUALITY_CMD:
252  	case C_SENSITIVITY:
253  		D_HC_DUMP("back from %s (0x%08X)\n",
254  			  il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
255  		break;
256  	default:
257  		D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
258  		     pkt->hdr.flags);
259  	}
260  #endif
261  }
262  
263  static int
il_send_cmd_async(struct il_priv * il,struct il_host_cmd * cmd)264  il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
265  {
266  	int ret;
267  
268  	BUG_ON(!(cmd->flags & CMD_ASYNC));
269  
270  	/* An asynchronous command can not expect an SKB to be set. */
271  	BUG_ON(cmd->flags & CMD_WANT_SKB);
272  
273  	/* Assign a generic callback if one is not provided */
274  	if (!cmd->callback)
275  		cmd->callback = il_generic_cmd_callback;
276  
277  	if (test_bit(S_EXIT_PENDING, &il->status))
278  		return -EBUSY;
279  
280  	ret = il_enqueue_hcmd(il, cmd);
281  	if (ret < 0) {
282  		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
283  		       il_get_cmd_string(cmd->id), ret);
284  		return ret;
285  	}
286  	return 0;
287  }
288  
289  int
il_send_cmd_sync(struct il_priv * il,struct il_host_cmd * cmd)290  il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
291  {
292  	int cmd_idx;
293  	int ret;
294  
295  	lockdep_assert_held(&il->mutex);
296  
297  	BUG_ON(cmd->flags & CMD_ASYNC);
298  
299  	/* A synchronous command can not have a callback set. */
300  	BUG_ON(cmd->callback);
301  
302  	D_INFO("Attempting to send sync command %s\n",
303  	       il_get_cmd_string(cmd->id));
304  
305  	set_bit(S_HCMD_ACTIVE, &il->status);
306  	D_INFO("Setting HCMD_ACTIVE for command %s\n",
307  	       il_get_cmd_string(cmd->id));
308  
309  	cmd_idx = il_enqueue_hcmd(il, cmd);
310  	if (cmd_idx < 0) {
311  		ret = cmd_idx;
312  		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
313  		       il_get_cmd_string(cmd->id), ret);
314  		goto out;
315  	}
316  
317  	ret = wait_event_timeout(il->wait_command_queue,
318  				 !test_bit(S_HCMD_ACTIVE, &il->status),
319  				 HOST_COMPLETE_TIMEOUT);
320  	if (!ret) {
321  		if (test_bit(S_HCMD_ACTIVE, &il->status)) {
322  			IL_ERR("Error sending %s: time out after %dms.\n",
323  			       il_get_cmd_string(cmd->id),
324  			       jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
325  
326  			clear_bit(S_HCMD_ACTIVE, &il->status);
327  			D_INFO("Clearing HCMD_ACTIVE for command %s\n",
328  			       il_get_cmd_string(cmd->id));
329  			ret = -ETIMEDOUT;
330  			goto cancel;
331  		}
332  	}
333  
334  	if (test_bit(S_RFKILL, &il->status)) {
335  		IL_ERR("Command %s aborted: RF KILL Switch\n",
336  		       il_get_cmd_string(cmd->id));
337  		ret = -ECANCELED;
338  		goto fail;
339  	}
340  	if (test_bit(S_FW_ERROR, &il->status)) {
341  		IL_ERR("Command %s failed: FW Error\n",
342  		       il_get_cmd_string(cmd->id));
343  		ret = -EIO;
344  		goto fail;
345  	}
346  	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
347  		IL_ERR("Error: Response NULL in '%s'\n",
348  		       il_get_cmd_string(cmd->id));
349  		ret = -EIO;
350  		goto cancel;
351  	}
352  
353  	ret = 0;
354  	goto out;
355  
356  cancel:
357  	if (cmd->flags & CMD_WANT_SKB) {
358  		/*
359  		 * Cancel the CMD_WANT_SKB flag for the cmd in the
360  		 * TX cmd queue. Otherwise in case the cmd comes
361  		 * in later, it will possibly set an invalid
362  		 * address (cmd->meta.source).
363  		 */
364  		il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
365  	}
366  fail:
367  	if (cmd->reply_page) {
368  		il_free_pages(il, cmd->reply_page);
369  		cmd->reply_page = 0;
370  	}
371  out:
372  	return ret;
373  }
374  EXPORT_SYMBOL(il_send_cmd_sync);
375  
376  int
il_send_cmd(struct il_priv * il,struct il_host_cmd * cmd)377  il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
378  {
379  	if (cmd->flags & CMD_ASYNC)
380  		return il_send_cmd_async(il, cmd);
381  
382  	return il_send_cmd_sync(il, cmd);
383  }
384  EXPORT_SYMBOL(il_send_cmd);
385  
386  int
il_send_cmd_pdu(struct il_priv * il,u8 id,u16 len,const void * data)387  il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
388  {
389  	struct il_host_cmd cmd = {
390  		.id = id,
391  		.len = len,
392  		.data = data,
393  	};
394  
395  	return il_send_cmd_sync(il, &cmd);
396  }
397  EXPORT_SYMBOL(il_send_cmd_pdu);
398  
399  int
il_send_cmd_pdu_async(struct il_priv * il,u8 id,u16 len,const void * data,void (* callback)(struct il_priv * il,struct il_device_cmd * cmd,struct il_rx_pkt * pkt))400  il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
401  		      void (*callback) (struct il_priv *il,
402  					struct il_device_cmd *cmd,
403  					struct il_rx_pkt *pkt))
404  {
405  	struct il_host_cmd cmd = {
406  		.id = id,
407  		.len = len,
408  		.data = data,
409  	};
410  
411  	cmd.flags |= CMD_ASYNC;
412  	cmd.callback = callback;
413  
414  	return il_send_cmd_async(il, &cmd);
415  }
416  EXPORT_SYMBOL(il_send_cmd_pdu_async);
417  
418  /* default: IL_LED_BLINK(0) using blinking idx table */
419  static int led_mode;
420  module_param(led_mode, int, 0444);
421  MODULE_PARM_DESC(led_mode,
422  		 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
423  
424  /* Throughput		OFF time(ms)	ON time (ms)
425   *	>300			25		25
426   *	>200 to 300		40		40
427   *	>100 to 200		55		55
428   *	>70 to 100		65		65
429   *	>50 to 70		75		75
430   *	>20 to 50		85		85
431   *	>10 to 20		95		95
432   *	>5 to 10		110		110
433   *	>1 to 5			130		130
434   *	>0 to 1			167		167
435   *	<=0					SOLID ON
436   */
437  static const struct ieee80211_tpt_blink il_blink[] = {
438  	{.throughput = 0,		.blink_time = 334},
439  	{.throughput = 1 * 1024 - 1,	.blink_time = 260},
440  	{.throughput = 5 * 1024 - 1,	.blink_time = 220},
441  	{.throughput = 10 * 1024 - 1,	.blink_time = 190},
442  	{.throughput = 20 * 1024 - 1,	.blink_time = 170},
443  	{.throughput = 50 * 1024 - 1,	.blink_time = 150},
444  	{.throughput = 70 * 1024 - 1,	.blink_time = 130},
445  	{.throughput = 100 * 1024 - 1,	.blink_time = 110},
446  	{.throughput = 200 * 1024 - 1,	.blink_time = 80},
447  	{.throughput = 300 * 1024 - 1,	.blink_time = 50},
448  };
449  
450  /*
451   * Adjust led blink rate to compensate on a MAC Clock difference on every HW
452   * Led blink rate analysis showed an average deviation of 0% on 3945,
453   * 5% on 4965 HW.
454   * Need to compensate on the led on/off time per HW according to the deviation
455   * to achieve the desired led frequency
456   * The calculation is: (100-averageDeviation)/100 * blinkTime
457   * For code efficiency the calculation will be:
458   *     compensation = (100 - averageDeviation) * 64 / 100
459   *     NewBlinkTime = (compensation * BlinkTime) / 64
460   */
461  static inline u8
il_blink_compensation(struct il_priv * il,u8 time,u16 compensation)462  il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
463  {
464  	if (!compensation) {
465  		IL_ERR("undefined blink compensation: "
466  		       "use pre-defined blinking time\n");
467  		return time;
468  	}
469  
470  	return (u8) ((time * compensation) >> 6);
471  }
472  
473  /* Set led pattern command */
474  static int
il_led_cmd(struct il_priv * il,unsigned long on,unsigned long off)475  il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
476  {
477  	struct il_led_cmd led_cmd = {
478  		.id = IL_LED_LINK,
479  		.interval = IL_DEF_LED_INTRVL
480  	};
481  	int ret;
482  
483  	if (!test_bit(S_READY, &il->status))
484  		return -EBUSY;
485  
486  	if (il->blink_on == on && il->blink_off == off)
487  		return 0;
488  
489  	if (off == 0) {
490  		/* led is SOLID_ON */
491  		on = IL_LED_SOLID;
492  	}
493  
494  	D_LED("Led blink time compensation=%u\n",
495  	      il->cfg->led_compensation);
496  	led_cmd.on =
497  	    il_blink_compensation(il, on,
498  				  il->cfg->led_compensation);
499  	led_cmd.off =
500  	    il_blink_compensation(il, off,
501  				  il->cfg->led_compensation);
502  
503  	ret = il->ops->send_led_cmd(il, &led_cmd);
504  	if (!ret) {
505  		il->blink_on = on;
506  		il->blink_off = off;
507  	}
508  	return ret;
509  }
510  
511  static void
il_led_brightness_set(struct led_classdev * led_cdev,enum led_brightness brightness)512  il_led_brightness_set(struct led_classdev *led_cdev,
513  		      enum led_brightness brightness)
514  {
515  	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
516  	unsigned long on = 0;
517  
518  	if (brightness > 0)
519  		on = IL_LED_SOLID;
520  
521  	il_led_cmd(il, on, 0);
522  }
523  
524  static int
il_led_blink_set(struct led_classdev * led_cdev,unsigned long * delay_on,unsigned long * delay_off)525  il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
526  		 unsigned long *delay_off)
527  {
528  	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
529  
530  	return il_led_cmd(il, *delay_on, *delay_off);
531  }
532  
533  void
il_leds_init(struct il_priv * il)534  il_leds_init(struct il_priv *il)
535  {
536  	int mode = led_mode;
537  	int ret;
538  
539  	if (mode == IL_LED_DEFAULT)
540  		mode = il->cfg->led_mode;
541  
542  	il->led.name =
543  	    kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
544  	if (!il->led.name)
545  		return;
546  
547  	il->led.brightness_set = il_led_brightness_set;
548  	il->led.blink_set = il_led_blink_set;
549  	il->led.max_brightness = 1;
550  
551  	switch (mode) {
552  	case IL_LED_DEFAULT:
553  		WARN_ON(1);
554  		break;
555  	case IL_LED_BLINK:
556  		il->led.default_trigger =
557  		    ieee80211_create_tpt_led_trigger(il->hw,
558  						     IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
559  						     il_blink,
560  						     ARRAY_SIZE(il_blink));
561  		break;
562  	case IL_LED_RF_STATE:
563  		il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
564  		break;
565  	}
566  
567  	ret = led_classdev_register(&il->pci_dev->dev, &il->led);
568  	if (ret) {
569  		kfree(il->led.name);
570  		return;
571  	}
572  
573  	il->led_registered = true;
574  }
575  EXPORT_SYMBOL(il_leds_init);
576  
577  void
il_leds_exit(struct il_priv * il)578  il_leds_exit(struct il_priv *il)
579  {
580  	if (!il->led_registered)
581  		return;
582  
583  	led_classdev_unregister(&il->led);
584  	kfree(il->led.name);
585  }
586  EXPORT_SYMBOL(il_leds_exit);
587  
588  /************************** EEPROM BANDS ****************************
589   *
590   * The il_eeprom_band definitions below provide the mapping from the
591   * EEPROM contents to the specific channel number supported for each
592   * band.
593   *
594   * For example, il_priv->eeprom.band_3_channels[4] from the band_3
595   * definition below maps to physical channel 42 in the 5.2GHz spectrum.
596   * The specific geography and calibration information for that channel
597   * is contained in the eeprom map itself.
598   *
599   * During init, we copy the eeprom information and channel map
600   * information into il->channel_info_24/52 and il->channel_map_24/52
601   *
602   * channel_map_24/52 provides the idx in the channel_info array for a
603   * given channel.  We have to have two separate maps as there is channel
604   * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
605   * band_2
606   *
607   * A value of 0xff stored in the channel_map indicates that the channel
608   * is not supported by the hardware at all.
609   *
610   * A value of 0xfe in the channel_map indicates that the channel is not
611   * valid for Tx with the current hardware.  This means that
612   * while the system can tune and receive on a given channel, it may not
613   * be able to associate or transmit any frames on that
614   * channel.  There is no corresponding channel information for that
615   * entry.
616   *
617   *********************************************************************/
618  
619  /* 2.4 GHz */
620  const u8 il_eeprom_band_1[14] = {
621  	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
622  };
623  
624  /* 5.2 GHz bands */
625  static const u8 il_eeprom_band_2[] = {	/* 4915-5080MHz */
626  	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
627  };
628  
629  static const u8 il_eeprom_band_3[] = {	/* 5170-5320MHz */
630  	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
631  };
632  
633  static const u8 il_eeprom_band_4[] = {	/* 5500-5700MHz */
634  	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
635  };
636  
637  static const u8 il_eeprom_band_5[] = {	/* 5725-5825MHz */
638  	145, 149, 153, 157, 161, 165
639  };
640  
641  static const u8 il_eeprom_band_6[] = {	/* 2.4 ht40 channel */
642  	1, 2, 3, 4, 5, 6, 7
643  };
644  
645  static const u8 il_eeprom_band_7[] = {	/* 5.2 ht40 channel */
646  	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
647  };
648  
649  /******************************************************************************
650   *
651   * EEPROM related functions
652   *
653  ******************************************************************************/
654  
655  static int
il_eeprom_verify_signature(struct il_priv * il)656  il_eeprom_verify_signature(struct il_priv *il)
657  {
658  	u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
659  	int ret = 0;
660  
661  	D_EEPROM("EEPROM signature=0x%08x\n", gp);
662  	switch (gp) {
663  	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
664  	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
665  		break;
666  	default:
667  		IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
668  		ret = -ENOENT;
669  		break;
670  	}
671  	return ret;
672  }
673  
674  const u8 *
il_eeprom_query_addr(const struct il_priv * il,size_t offset)675  il_eeprom_query_addr(const struct il_priv *il, size_t offset)
676  {
677  	BUG_ON(offset >= il->cfg->eeprom_size);
678  	return &il->eeprom[offset];
679  }
680  EXPORT_SYMBOL(il_eeprom_query_addr);
681  
682  u16
il_eeprom_query16(const struct il_priv * il,size_t offset)683  il_eeprom_query16(const struct il_priv *il, size_t offset)
684  {
685  	if (!il->eeprom)
686  		return 0;
687  	return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
688  }
689  EXPORT_SYMBOL(il_eeprom_query16);
690  
691  /*
692   * il_eeprom_init - read EEPROM contents
693   *
694   * Load the EEPROM contents from adapter into il->eeprom
695   *
696   * NOTE:  This routine uses the non-debug IO access functions.
697   */
698  int
il_eeprom_init(struct il_priv * il)699  il_eeprom_init(struct il_priv *il)
700  {
701  	__le16 *e;
702  	u32 gp = _il_rd(il, CSR_EEPROM_GP);
703  	int sz;
704  	int ret;
705  	int addr;
706  
707  	/* allocate eeprom */
708  	sz = il->cfg->eeprom_size;
709  	D_EEPROM("NVM size = %d\n", sz);
710  	il->eeprom = kzalloc(sz, GFP_KERNEL);
711  	if (!il->eeprom)
712  		return -ENOMEM;
713  
714  	e = (__le16 *) il->eeprom;
715  
716  	il->ops->apm_init(il);
717  
718  	ret = il_eeprom_verify_signature(il);
719  	if (ret < 0) {
720  		IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
721  		ret = -ENOENT;
722  		goto err;
723  	}
724  
725  	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
726  	ret = il->ops->eeprom_acquire_semaphore(il);
727  	if (ret < 0) {
728  		IL_ERR("Failed to acquire EEPROM semaphore.\n");
729  		ret = -ENOENT;
730  		goto err;
731  	}
732  
733  	/* eeprom is an array of 16bit values */
734  	for (addr = 0; addr < sz; addr += sizeof(u16)) {
735  		u32 r;
736  
737  		_il_wr(il, CSR_EEPROM_REG,
738  		       CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
739  
740  		ret =
741  		    _il_poll_bit(il, CSR_EEPROM_REG,
742  				 CSR_EEPROM_REG_READ_VALID_MSK,
743  				 CSR_EEPROM_REG_READ_VALID_MSK,
744  				 IL_EEPROM_ACCESS_TIMEOUT);
745  		if (ret < 0) {
746  			IL_ERR("Time out reading EEPROM[%d]\n", addr);
747  			goto done;
748  		}
749  		r = _il_rd(il, CSR_EEPROM_REG);
750  		e[addr / 2] = cpu_to_le16(r >> 16);
751  	}
752  
753  	D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
754  		 il_eeprom_query16(il, EEPROM_VERSION));
755  
756  	ret = 0;
757  done:
758  	il->ops->eeprom_release_semaphore(il);
759  
760  err:
761  	if (ret)
762  		il_eeprom_free(il);
763  	/* Reset chip to save power until we load uCode during "up". */
764  	il_apm_stop(il);
765  	return ret;
766  }
767  EXPORT_SYMBOL(il_eeprom_init);
768  
769  void
il_eeprom_free(struct il_priv * il)770  il_eeprom_free(struct il_priv *il)
771  {
772  	kfree(il->eeprom);
773  	il->eeprom = NULL;
774  }
775  EXPORT_SYMBOL(il_eeprom_free);
776  
777  static void
il_init_band_reference(const struct il_priv * il,int eep_band,int * eeprom_ch_count,const struct il_eeprom_channel ** eeprom_ch_info,const u8 ** eeprom_ch_idx)778  il_init_band_reference(const struct il_priv *il, int eep_band,
779  		       int *eeprom_ch_count,
780  		       const struct il_eeprom_channel **eeprom_ch_info,
781  		       const u8 **eeprom_ch_idx)
782  {
783  	u32 offset = il->cfg->regulatory_bands[eep_band - 1];
784  
785  	switch (eep_band) {
786  	case 1:		/* 2.4GHz band */
787  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
788  		*eeprom_ch_info =
789  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
790  								     offset);
791  		*eeprom_ch_idx = il_eeprom_band_1;
792  		break;
793  	case 2:		/* 4.9GHz band */
794  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
795  		*eeprom_ch_info =
796  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
797  								     offset);
798  		*eeprom_ch_idx = il_eeprom_band_2;
799  		break;
800  	case 3:		/* 5.2GHz band */
801  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
802  		*eeprom_ch_info =
803  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
804  								     offset);
805  		*eeprom_ch_idx = il_eeprom_band_3;
806  		break;
807  	case 4:		/* 5.5GHz band */
808  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
809  		*eeprom_ch_info =
810  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
811  								     offset);
812  		*eeprom_ch_idx = il_eeprom_band_4;
813  		break;
814  	case 5:		/* 5.7GHz band */
815  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
816  		*eeprom_ch_info =
817  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
818  								     offset);
819  		*eeprom_ch_idx = il_eeprom_band_5;
820  		break;
821  	case 6:		/* 2.4GHz ht40 channels */
822  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
823  		*eeprom_ch_info =
824  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
825  								     offset);
826  		*eeprom_ch_idx = il_eeprom_band_6;
827  		break;
828  	case 7:		/* 5 GHz ht40 channels */
829  		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
830  		*eeprom_ch_info =
831  		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
832  								     offset);
833  		*eeprom_ch_idx = il_eeprom_band_7;
834  		break;
835  	default:
836  		BUG();
837  	}
838  }
839  
840  #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
841  			    ? # x " " : "")
842  /*
843   * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
844   *
845   * Does not set up a command, or touch hardware.
846   */
847  static int
il_mod_ht40_chan_info(struct il_priv * il,enum nl80211_band band,u16 channel,const struct il_eeprom_channel * eeprom_ch,u8 clear_ht40_extension_channel)848  il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
849  		      const struct il_eeprom_channel *eeprom_ch,
850  		      u8 clear_ht40_extension_channel)
851  {
852  	struct il_channel_info *ch_info;
853  
854  	ch_info =
855  	    (struct il_channel_info *)il_get_channel_info(il, band, channel);
856  
857  	if (!il_is_channel_valid(ch_info))
858  		return -1;
859  
860  	D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
861  		 " Ad-Hoc %ssupported\n", ch_info->channel,
862  		 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
863  		 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
864  		 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
865  		 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
866  		 eeprom_ch->max_power_avg,
867  		 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
868  		  !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
869  
870  	ch_info->ht40_eeprom = *eeprom_ch;
871  	ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
872  	ch_info->ht40_flags = eeprom_ch->flags;
873  	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
874  		ch_info->ht40_extension_channel &=
875  		    ~clear_ht40_extension_channel;
876  
877  	return 0;
878  }
879  
880  #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
881  			    ? # x " " : "")
882  
883  /*
884   * il_init_channel_map - Set up driver's info for all possible channels
885   */
886  int
il_init_channel_map(struct il_priv * il)887  il_init_channel_map(struct il_priv *il)
888  {
889  	int eeprom_ch_count = 0;
890  	const u8 *eeprom_ch_idx = NULL;
891  	const struct il_eeprom_channel *eeprom_ch_info = NULL;
892  	int band, ch;
893  	struct il_channel_info *ch_info;
894  
895  	if (il->channel_count) {
896  		D_EEPROM("Channel map already initialized.\n");
897  		return 0;
898  	}
899  
900  	D_EEPROM("Initializing regulatory info from EEPROM\n");
901  
902  	il->channel_count =
903  	    ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
904  	    ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
905  	    ARRAY_SIZE(il_eeprom_band_5);
906  
907  	D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
908  
909  	il->channel_info =
910  	    kcalloc(il->channel_count, sizeof(struct il_channel_info),
911  		    GFP_KERNEL);
912  	if (!il->channel_info) {
913  		IL_ERR("Could not allocate channel_info\n");
914  		il->channel_count = 0;
915  		return -ENOMEM;
916  	}
917  
918  	ch_info = il->channel_info;
919  
920  	/* Loop through the 5 EEPROM bands adding them in order to the
921  	 * channel map we maintain (that contains additional information than
922  	 * what just in the EEPROM) */
923  	for (band = 1; band <= 5; band++) {
924  
925  		il_init_band_reference(il, band, &eeprom_ch_count,
926  				       &eeprom_ch_info, &eeprom_ch_idx);
927  
928  		/* Loop through each band adding each of the channels */
929  		for (ch = 0; ch < eeprom_ch_count; ch++) {
930  			ch_info->channel = eeprom_ch_idx[ch];
931  			ch_info->band =
932  			    (band ==
933  			     1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
934  
935  			/* permanently store EEPROM's channel regulatory flags
936  			 *   and max power in channel info database. */
937  			ch_info->eeprom = eeprom_ch_info[ch];
938  
939  			/* Copy the run-time flags so they are there even on
940  			 * invalid channels */
941  			ch_info->flags = eeprom_ch_info[ch].flags;
942  			/* First write that ht40 is not enabled, and then enable
943  			 * one by one */
944  			ch_info->ht40_extension_channel =
945  			    IEEE80211_CHAN_NO_HT40;
946  
947  			if (!(il_is_channel_valid(ch_info))) {
948  				D_EEPROM("Ch. %d Flags %x [%sGHz] - "
949  					 "No traffic\n", ch_info->channel,
950  					 ch_info->flags,
951  					 il_is_channel_a_band(ch_info) ? "5.2" :
952  					 "2.4");
953  				ch_info++;
954  				continue;
955  			}
956  
957  			/* Initialize regulatory-based run-time data */
958  			ch_info->max_power_avg = ch_info->curr_txpow =
959  			    eeprom_ch_info[ch].max_power_avg;
960  			ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
961  			ch_info->min_power = 0;
962  
963  			D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
964  				 " Ad-Hoc %ssupported\n", ch_info->channel,
965  				 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
966  				 CHECK_AND_PRINT_I(VALID),
967  				 CHECK_AND_PRINT_I(IBSS),
968  				 CHECK_AND_PRINT_I(ACTIVE),
969  				 CHECK_AND_PRINT_I(RADAR),
970  				 CHECK_AND_PRINT_I(WIDE),
971  				 CHECK_AND_PRINT_I(DFS),
972  				 eeprom_ch_info[ch].flags,
973  				 eeprom_ch_info[ch].max_power_avg,
974  				 ((eeprom_ch_info[ch].
975  				   flags & EEPROM_CHANNEL_IBSS) &&
976  				  !(eeprom_ch_info[ch].
977  				    flags & EEPROM_CHANNEL_RADAR)) ? "" :
978  				 "not ");
979  
980  			ch_info++;
981  		}
982  	}
983  
984  	/* Check if we do have HT40 channels */
985  	if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
986  	    il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
987  		return 0;
988  
989  	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
990  	for (band = 6; band <= 7; band++) {
991  		enum nl80211_band ieeeband;
992  
993  		il_init_band_reference(il, band, &eeprom_ch_count,
994  				       &eeprom_ch_info, &eeprom_ch_idx);
995  
996  		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
997  		ieeeband =
998  		    (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
999  
1000  		/* Loop through each band adding each of the channels */
1001  		for (ch = 0; ch < eeprom_ch_count; ch++) {
1002  			/* Set up driver's info for lower half */
1003  			il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1004  					      &eeprom_ch_info[ch],
1005  					      IEEE80211_CHAN_NO_HT40PLUS);
1006  
1007  			/* Set up driver's info for upper half */
1008  			il_mod_ht40_chan_info(il, ieeeband,
1009  					      eeprom_ch_idx[ch] + 4,
1010  					      &eeprom_ch_info[ch],
1011  					      IEEE80211_CHAN_NO_HT40MINUS);
1012  		}
1013  	}
1014  
1015  	return 0;
1016  }
1017  EXPORT_SYMBOL(il_init_channel_map);
1018  
1019  /*
1020   * il_free_channel_map - undo allocations in il_init_channel_map
1021   */
1022  void
il_free_channel_map(struct il_priv * il)1023  il_free_channel_map(struct il_priv *il)
1024  {
1025  	kfree(il->channel_info);
1026  	il->channel_count = 0;
1027  }
1028  EXPORT_SYMBOL(il_free_channel_map);
1029  
1030  /*
1031   * il_get_channel_info - Find driver's ilate channel info
1032   *
1033   * Based on band and channel number.
1034   */
1035  const struct il_channel_info *
il_get_channel_info(const struct il_priv * il,enum nl80211_band band,u16 channel)1036  il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
1037  		    u16 channel)
1038  {
1039  	int i;
1040  
1041  	switch (band) {
1042  	case NL80211_BAND_5GHZ:
1043  		for (i = 14; i < il->channel_count; i++) {
1044  			if (il->channel_info[i].channel == channel)
1045  				return &il->channel_info[i];
1046  		}
1047  		break;
1048  	case NL80211_BAND_2GHZ:
1049  		if (channel >= 1 && channel <= 14)
1050  			return &il->channel_info[channel - 1];
1051  		break;
1052  	default:
1053  		BUG();
1054  	}
1055  
1056  	return NULL;
1057  }
1058  EXPORT_SYMBOL(il_get_channel_info);
1059  
1060  /*
1061   * Setting power level allows the card to go to sleep when not busy.
1062   *
1063   * We calculate a sleep command based on the required latency, which
1064   * we get from mac80211.
1065   */
1066  
1067  #define SLP_VEC(X0, X1, X2, X3, X4) { \
1068  		cpu_to_le32(X0), \
1069  		cpu_to_le32(X1), \
1070  		cpu_to_le32(X2), \
1071  		cpu_to_le32(X3), \
1072  		cpu_to_le32(X4)  \
1073  }
1074  
1075  static void
il_build_powertable_cmd(struct il_priv * il,struct il_powertable_cmd * cmd)1076  il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1077  {
1078  	static const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1079  		SLP_VEC(2, 2, 4, 6, 0xFF),
1080  		SLP_VEC(2, 4, 7, 10, 10),
1081  		SLP_VEC(4, 7, 10, 10, 0xFF)
1082  	};
1083  	int i, dtim_period, no_dtim;
1084  	u32 max_sleep;
1085  	bool skip;
1086  
1087  	memset(cmd, 0, sizeof(*cmd));
1088  
1089  	if (il->power_data.pci_pm)
1090  		cmd->flags |= IL_POWER_PCI_PM_MSK;
1091  
1092  	/* if no Power Save, we are done */
1093  	if (il->power_data.ps_disabled)
1094  		return;
1095  
1096  	cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1097  	cmd->keep_alive_seconds = 0;
1098  	cmd->debug_flags = 0;
1099  	cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1100  	cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1101  	cmd->keep_alive_beacons = 0;
1102  
1103  	dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1104  
1105  	if (dtim_period <= 2) {
1106  		memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1107  		no_dtim = 2;
1108  	} else if (dtim_period <= 10) {
1109  		memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1110  		no_dtim = 2;
1111  	} else {
1112  		memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1113  		no_dtim = 0;
1114  	}
1115  
1116  	if (dtim_period == 0) {
1117  		dtim_period = 1;
1118  		skip = false;
1119  	} else {
1120  		skip = !!no_dtim;
1121  	}
1122  
1123  	if (skip) {
1124  		__le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1125  
1126  		max_sleep = le32_to_cpu(tmp);
1127  		if (max_sleep == 0xFF)
1128  			max_sleep = dtim_period * (skip + 1);
1129  		else if (max_sleep >  dtim_period)
1130  			max_sleep = (max_sleep / dtim_period) * dtim_period;
1131  		cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1132  	} else {
1133  		max_sleep = dtim_period;
1134  		cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1135  	}
1136  
1137  	for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1138  		if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1139  			cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1140  }
1141  
1142  static int
il_set_power(struct il_priv * il,struct il_powertable_cmd * cmd)1143  il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1144  {
1145  	D_POWER("Sending power/sleep command\n");
1146  	D_POWER("Flags value = 0x%08X\n", cmd->flags);
1147  	D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1148  	D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1149  	D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1150  		le32_to_cpu(cmd->sleep_interval[0]),
1151  		le32_to_cpu(cmd->sleep_interval[1]),
1152  		le32_to_cpu(cmd->sleep_interval[2]),
1153  		le32_to_cpu(cmd->sleep_interval[3]),
1154  		le32_to_cpu(cmd->sleep_interval[4]));
1155  
1156  	return il_send_cmd_pdu(il, C_POWER_TBL,
1157  			       sizeof(struct il_powertable_cmd), cmd);
1158  }
1159  
1160  static int
il_power_set_mode(struct il_priv * il,struct il_powertable_cmd * cmd,bool force)1161  il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1162  {
1163  	int ret;
1164  	bool update_chains;
1165  
1166  	lockdep_assert_held(&il->mutex);
1167  
1168  	/* Don't update the RX chain when chain noise calibration is running */
1169  	update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1170  	    il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1171  
1172  	if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1173  		return 0;
1174  
1175  	if (!il_is_ready_rf(il))
1176  		return -EIO;
1177  
1178  	/* scan complete use sleep_power_next, need to be updated */
1179  	memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1180  	if (test_bit(S_SCANNING, &il->status) && !force) {
1181  		D_INFO("Defer power set mode while scanning\n");
1182  		return 0;
1183  	}
1184  
1185  	if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1186  		set_bit(S_POWER_PMI, &il->status);
1187  
1188  	ret = il_set_power(il, cmd);
1189  	if (!ret) {
1190  		if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1191  			clear_bit(S_POWER_PMI, &il->status);
1192  
1193  		if (il->ops->update_chain_flags && update_chains)
1194  			il->ops->update_chain_flags(il);
1195  		else if (il->ops->update_chain_flags)
1196  			D_POWER("Cannot update the power, chain noise "
1197  				"calibration running: %d\n",
1198  				il->chain_noise_data.state);
1199  
1200  		memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1201  	} else
1202  		IL_ERR("set power fail, ret = %d", ret);
1203  
1204  	return ret;
1205  }
1206  
1207  int
il_power_update_mode(struct il_priv * il,bool force)1208  il_power_update_mode(struct il_priv *il, bool force)
1209  {
1210  	struct il_powertable_cmd cmd;
1211  
1212  	il_build_powertable_cmd(il, &cmd);
1213  
1214  	return il_power_set_mode(il, &cmd, force);
1215  }
1216  EXPORT_SYMBOL(il_power_update_mode);
1217  
1218  /* initialize to default */
1219  void
il_power_initialize(struct il_priv * il)1220  il_power_initialize(struct il_priv *il)
1221  {
1222  	u16 lctl;
1223  
1224  	pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1225  	il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1226  
1227  	il->power_data.debug_sleep_level_override = -1;
1228  
1229  	memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1230  }
1231  EXPORT_SYMBOL(il_power_initialize);
1232  
1233  /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1234   * sending probe req.  This should be set long enough to hear probe responses
1235   * from more than one AP.  */
1236  #define IL_ACTIVE_DWELL_TIME_24    (30)	/* all times in msec */
1237  #define IL_ACTIVE_DWELL_TIME_52    (20)
1238  
1239  #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1240  #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1241  
1242  /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1243   * Must be set longer than active dwell time.
1244   * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1245  #define IL_PASSIVE_DWELL_TIME_24   (20)	/* all times in msec */
1246  #define IL_PASSIVE_DWELL_TIME_52   (10)
1247  #define IL_PASSIVE_DWELL_BASE      (100)
1248  #define IL_CHANNEL_TUNE_TIME       5
1249  
1250  static int
il_send_scan_abort(struct il_priv * il)1251  il_send_scan_abort(struct il_priv *il)
1252  {
1253  	int ret;
1254  	struct il_rx_pkt *pkt;
1255  	struct il_host_cmd cmd = {
1256  		.id = C_SCAN_ABORT,
1257  		.flags = CMD_WANT_SKB,
1258  	};
1259  
1260  	/* Exit instantly with error when device is not ready
1261  	 * to receive scan abort command or it does not perform
1262  	 * hardware scan currently */
1263  	if (!test_bit(S_READY, &il->status) ||
1264  	    !test_bit(S_GEO_CONFIGURED, &il->status) ||
1265  	    !test_bit(S_SCAN_HW, &il->status) ||
1266  	    test_bit(S_FW_ERROR, &il->status) ||
1267  	    test_bit(S_EXIT_PENDING, &il->status))
1268  		return -EIO;
1269  
1270  	ret = il_send_cmd_sync(il, &cmd);
1271  	if (ret)
1272  		return ret;
1273  
1274  	pkt = (struct il_rx_pkt *)cmd.reply_page;
1275  	if (pkt->u.status != CAN_ABORT_STATUS) {
1276  		/* The scan abort will return 1 for success or
1277  		 * 2 for "failure".  A failure condition can be
1278  		 * due to simply not being in an active scan which
1279  		 * can occur if we send the scan abort before we
1280  		 * the microcode has notified us that a scan is
1281  		 * completed. */
1282  		D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1283  		ret = -EIO;
1284  	}
1285  
1286  	il_free_pages(il, cmd.reply_page);
1287  	return ret;
1288  }
1289  
1290  static void
il_complete_scan(struct il_priv * il,bool aborted)1291  il_complete_scan(struct il_priv *il, bool aborted)
1292  {
1293  	struct cfg80211_scan_info info = {
1294  		.aborted = aborted,
1295  	};
1296  
1297  	/* check if scan was requested from mac80211 */
1298  	if (il->scan_request) {
1299  		D_SCAN("Complete scan in mac80211\n");
1300  		ieee80211_scan_completed(il->hw, &info);
1301  	}
1302  
1303  	il->scan_vif = NULL;
1304  	il->scan_request = NULL;
1305  }
1306  
1307  void
il_force_scan_end(struct il_priv * il)1308  il_force_scan_end(struct il_priv *il)
1309  {
1310  	lockdep_assert_held(&il->mutex);
1311  
1312  	if (!test_bit(S_SCANNING, &il->status)) {
1313  		D_SCAN("Forcing scan end while not scanning\n");
1314  		return;
1315  	}
1316  
1317  	D_SCAN("Forcing scan end\n");
1318  	clear_bit(S_SCANNING, &il->status);
1319  	clear_bit(S_SCAN_HW, &il->status);
1320  	clear_bit(S_SCAN_ABORTING, &il->status);
1321  	il_complete_scan(il, true);
1322  }
1323  
1324  static void
il_do_scan_abort(struct il_priv * il)1325  il_do_scan_abort(struct il_priv *il)
1326  {
1327  	int ret;
1328  
1329  	lockdep_assert_held(&il->mutex);
1330  
1331  	if (!test_bit(S_SCANNING, &il->status)) {
1332  		D_SCAN("Not performing scan to abort\n");
1333  		return;
1334  	}
1335  
1336  	if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1337  		D_SCAN("Scan abort in progress\n");
1338  		return;
1339  	}
1340  
1341  	ret = il_send_scan_abort(il);
1342  	if (ret) {
1343  		D_SCAN("Send scan abort failed %d\n", ret);
1344  		il_force_scan_end(il);
1345  	} else
1346  		D_SCAN("Successfully send scan abort\n");
1347  }
1348  
1349  /*
1350   * il_scan_cancel - Cancel any currently executing HW scan
1351   */
1352  int
il_scan_cancel(struct il_priv * il)1353  il_scan_cancel(struct il_priv *il)
1354  {
1355  	D_SCAN("Queuing abort scan\n");
1356  	queue_work(il->workqueue, &il->abort_scan);
1357  	return 0;
1358  }
1359  EXPORT_SYMBOL(il_scan_cancel);
1360  
1361  /*
1362   * il_scan_cancel_timeout - Cancel any currently executing HW scan
1363   * @ms: amount of time to wait (in milliseconds) for scan to abort
1364   *
1365   */
1366  int
il_scan_cancel_timeout(struct il_priv * il,unsigned long ms)1367  il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1368  {
1369  	unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1370  
1371  	lockdep_assert_held(&il->mutex);
1372  
1373  	D_SCAN("Scan cancel timeout\n");
1374  
1375  	il_do_scan_abort(il);
1376  
1377  	while (time_before_eq(jiffies, timeout)) {
1378  		if (!test_bit(S_SCAN_HW, &il->status))
1379  			break;
1380  		msleep(20);
1381  	}
1382  
1383  	return test_bit(S_SCAN_HW, &il->status);
1384  }
1385  EXPORT_SYMBOL(il_scan_cancel_timeout);
1386  
1387  /* Service response to C_SCAN (0x80) */
1388  static void
il_hdl_scan(struct il_priv * il,struct il_rx_buf * rxb)1389  il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1390  {
1391  #ifdef CONFIG_IWLEGACY_DEBUG
1392  	struct il_rx_pkt *pkt = rxb_addr(rxb);
1393  	struct il_scanreq_notification *notif =
1394  	    (struct il_scanreq_notification *)pkt->u.raw;
1395  
1396  	D_SCAN("Scan request status = 0x%x\n", notif->status);
1397  #endif
1398  }
1399  
1400  /* Service N_SCAN_START (0x82) */
1401  static void
il_hdl_scan_start(struct il_priv * il,struct il_rx_buf * rxb)1402  il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1403  {
1404  	struct il_rx_pkt *pkt = rxb_addr(rxb);
1405  	struct il_scanstart_notification *notif =
1406  	    (struct il_scanstart_notification *)pkt->u.raw;
1407  	il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1408  	D_SCAN("Scan start: " "%d [802.11%s] "
1409  	       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1410  	       notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1411  	       le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1412  }
1413  
1414  /* Service N_SCAN_RESULTS (0x83) */
1415  static void
il_hdl_scan_results(struct il_priv * il,struct il_rx_buf * rxb)1416  il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1417  {
1418  #ifdef CONFIG_IWLEGACY_DEBUG
1419  	struct il_rx_pkt *pkt = rxb_addr(rxb);
1420  	struct il_scanresults_notification *notif =
1421  	    (struct il_scanresults_notification *)pkt->u.raw;
1422  
1423  	D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1424  	       "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1425  	       le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1426  	       le32_to_cpu(notif->stats[0]),
1427  	       le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1428  #endif
1429  }
1430  
1431  /* Service N_SCAN_COMPLETE (0x84) */
1432  static void
il_hdl_scan_complete(struct il_priv * il,struct il_rx_buf * rxb)1433  il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1434  {
1435  
1436  	struct il_rx_pkt *pkt = rxb_addr(rxb);
1437  	struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1438  
1439  	D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1440  	       scan_notif->scanned_channels, scan_notif->tsf_low,
1441  	       scan_notif->tsf_high, scan_notif->status);
1442  
1443  	/* The HW is no longer scanning */
1444  	clear_bit(S_SCAN_HW, &il->status);
1445  
1446  	D_SCAN("Scan on %sGHz took %dms\n",
1447  	       (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
1448  	       jiffies_to_msecs(jiffies - il->scan_start));
1449  
1450  	queue_work(il->workqueue, &il->scan_completed);
1451  }
1452  
1453  void
il_setup_rx_scan_handlers(struct il_priv * il)1454  il_setup_rx_scan_handlers(struct il_priv *il)
1455  {
1456  	/* scan handlers */
1457  	il->handlers[C_SCAN] = il_hdl_scan;
1458  	il->handlers[N_SCAN_START] = il_hdl_scan_start;
1459  	il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1460  	il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1461  }
1462  EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1463  
1464  u16
il_get_active_dwell_time(struct il_priv * il,enum nl80211_band band,u8 n_probes)1465  il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1466  			 u8 n_probes)
1467  {
1468  	if (band == NL80211_BAND_5GHZ)
1469  		return IL_ACTIVE_DWELL_TIME_52 +
1470  		    IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1471  	else
1472  		return IL_ACTIVE_DWELL_TIME_24 +
1473  		    IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1474  }
1475  EXPORT_SYMBOL(il_get_active_dwell_time);
1476  
1477  u16
il_get_passive_dwell_time(struct il_priv * il,enum nl80211_band band,struct ieee80211_vif * vif)1478  il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1479  			  struct ieee80211_vif *vif)
1480  {
1481  	u16 value;
1482  
1483  	u16 passive =
1484  	    (band ==
1485  	     NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1486  	    IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1487  	    IL_PASSIVE_DWELL_TIME_52;
1488  
1489  	if (il_is_any_associated(il)) {
1490  		/*
1491  		 * If we're associated, we clamp the maximum passive
1492  		 * dwell time to be 98% of the smallest beacon interval
1493  		 * (minus 2 * channel tune time)
1494  		 */
1495  		value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1496  		if (value > IL_PASSIVE_DWELL_BASE || !value)
1497  			value = IL_PASSIVE_DWELL_BASE;
1498  		value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1499  		passive = min(value, passive);
1500  	}
1501  
1502  	return passive;
1503  }
1504  EXPORT_SYMBOL(il_get_passive_dwell_time);
1505  
1506  void
il_init_scan_params(struct il_priv * il)1507  il_init_scan_params(struct il_priv *il)
1508  {
1509  	u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1510  	if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
1511  		il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
1512  	if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
1513  		il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
1514  }
1515  EXPORT_SYMBOL(il_init_scan_params);
1516  
1517  static int
il_scan_initiate(struct il_priv * il,struct ieee80211_vif * vif)1518  il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1519  {
1520  	int ret;
1521  
1522  	lockdep_assert_held(&il->mutex);
1523  
1524  	cancel_delayed_work(&il->scan_check);
1525  
1526  	if (!il_is_ready_rf(il)) {
1527  		IL_WARN("Request scan called when driver not ready.\n");
1528  		return -EIO;
1529  	}
1530  
1531  	if (test_bit(S_SCAN_HW, &il->status)) {
1532  		D_SCAN("Multiple concurrent scan requests in parallel.\n");
1533  		return -EBUSY;
1534  	}
1535  
1536  	if (test_bit(S_SCAN_ABORTING, &il->status)) {
1537  		D_SCAN("Scan request while abort pending.\n");
1538  		return -EBUSY;
1539  	}
1540  
1541  	D_SCAN("Starting scan...\n");
1542  
1543  	set_bit(S_SCANNING, &il->status);
1544  	il->scan_start = jiffies;
1545  
1546  	ret = il->ops->request_scan(il, vif);
1547  	if (ret) {
1548  		clear_bit(S_SCANNING, &il->status);
1549  		return ret;
1550  	}
1551  
1552  	queue_delayed_work(il->workqueue, &il->scan_check,
1553  			   IL_SCAN_CHECK_WATCHDOG);
1554  
1555  	return 0;
1556  }
1557  
1558  int
il_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)1559  il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1560  	       struct ieee80211_scan_request *hw_req)
1561  {
1562  	struct cfg80211_scan_request *req = &hw_req->req;
1563  	struct il_priv *il = hw->priv;
1564  	int ret;
1565  
1566  	if (req->n_channels == 0) {
1567  		IL_ERR("Can not scan on no channels.\n");
1568  		return -EINVAL;
1569  	}
1570  
1571  	mutex_lock(&il->mutex);
1572  	D_MAC80211("enter\n");
1573  
1574  	if (test_bit(S_SCANNING, &il->status)) {
1575  		D_SCAN("Scan already in progress.\n");
1576  		ret = -EAGAIN;
1577  		goto out_unlock;
1578  	}
1579  
1580  	/* mac80211 will only ask for one band at a time */
1581  	il->scan_request = req;
1582  	il->scan_vif = vif;
1583  	il->scan_band = req->channels[0]->band;
1584  
1585  	ret = il_scan_initiate(il, vif);
1586  
1587  out_unlock:
1588  	D_MAC80211("leave ret %d\n", ret);
1589  	mutex_unlock(&il->mutex);
1590  
1591  	return ret;
1592  }
1593  EXPORT_SYMBOL(il_mac_hw_scan);
1594  
1595  static void
il_bg_scan_check(struct work_struct * data)1596  il_bg_scan_check(struct work_struct *data)
1597  {
1598  	struct il_priv *il =
1599  	    container_of(data, struct il_priv, scan_check.work);
1600  
1601  	D_SCAN("Scan check work\n");
1602  
1603  	/* Since we are here firmware does not finish scan and
1604  	 * most likely is in bad shape, so we don't bother to
1605  	 * send abort command, just force scan complete to mac80211 */
1606  	mutex_lock(&il->mutex);
1607  	il_force_scan_end(il);
1608  	mutex_unlock(&il->mutex);
1609  }
1610  
1611  /*
1612   * il_fill_probe_req - fill in all required fields and IE for probe request
1613   */
1614  u16
il_fill_probe_req(struct il_priv * il,struct ieee80211_mgmt * frame,const u8 * ta,const u8 * ies,int ie_len,int left)1615  il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1616  		  const u8 *ta, const u8 *ies, int ie_len, int left)
1617  {
1618  	int len = 0;
1619  	u8 *pos = NULL;
1620  
1621  	/* Make sure there is enough space for the probe request,
1622  	 * two mandatory IEs and the data */
1623  	left -= 24;
1624  	if (left < 0)
1625  		return 0;
1626  
1627  	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1628  	eth_broadcast_addr(frame->da);
1629  	memcpy(frame->sa, ta, ETH_ALEN);
1630  	eth_broadcast_addr(frame->bssid);
1631  	frame->seq_ctrl = 0;
1632  
1633  	len += 24;
1634  
1635  	/* ...next IE... */
1636  	pos = &frame->u.probe_req.variable[0];
1637  
1638  	/* fill in our indirect SSID IE */
1639  	left -= 2;
1640  	if (left < 0)
1641  		return 0;
1642  	*pos++ = WLAN_EID_SSID;
1643  	*pos++ = 0;
1644  
1645  	len += 2;
1646  
1647  	if (WARN_ON(left < ie_len))
1648  		return len;
1649  
1650  	if (ies && ie_len) {
1651  		memcpy(pos, ies, ie_len);
1652  		len += ie_len;
1653  	}
1654  
1655  	return (u16) len;
1656  }
1657  EXPORT_SYMBOL(il_fill_probe_req);
1658  
1659  static void
il_bg_abort_scan(struct work_struct * work)1660  il_bg_abort_scan(struct work_struct *work)
1661  {
1662  	struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1663  
1664  	D_SCAN("Abort scan work\n");
1665  
1666  	/* We keep scan_check work queued in case when firmware will not
1667  	 * report back scan completed notification */
1668  	mutex_lock(&il->mutex);
1669  	il_scan_cancel_timeout(il, 200);
1670  	mutex_unlock(&il->mutex);
1671  }
1672  
1673  static void
il_bg_scan_completed(struct work_struct * work)1674  il_bg_scan_completed(struct work_struct *work)
1675  {
1676  	struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1677  	bool aborted;
1678  
1679  	D_SCAN("Completed scan.\n");
1680  
1681  	cancel_delayed_work(&il->scan_check);
1682  
1683  	mutex_lock(&il->mutex);
1684  
1685  	aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1686  	if (aborted)
1687  		D_SCAN("Aborted scan completed.\n");
1688  
1689  	if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1690  		D_SCAN("Scan already completed.\n");
1691  		goto out_settings;
1692  	}
1693  
1694  	il_complete_scan(il, aborted);
1695  
1696  out_settings:
1697  	/* Can we still talk to firmware ? */
1698  	if (!il_is_ready_rf(il))
1699  		goto out;
1700  
1701  	/*
1702  	 * We do not commit power settings while scan is pending,
1703  	 * do it now if the settings changed.
1704  	 */
1705  	il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1706  	il_set_tx_power(il, il->tx_power_next, false);
1707  
1708  	il->ops->post_scan(il);
1709  
1710  out:
1711  	mutex_unlock(&il->mutex);
1712  }
1713  
1714  void
il_setup_scan_deferred_work(struct il_priv * il)1715  il_setup_scan_deferred_work(struct il_priv *il)
1716  {
1717  	INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1718  	INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1719  	INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1720  }
1721  EXPORT_SYMBOL(il_setup_scan_deferred_work);
1722  
1723  void
il_cancel_scan_deferred_work(struct il_priv * il)1724  il_cancel_scan_deferred_work(struct il_priv *il)
1725  {
1726  	cancel_work_sync(&il->abort_scan);
1727  	cancel_work_sync(&il->scan_completed);
1728  
1729  	if (cancel_delayed_work_sync(&il->scan_check)) {
1730  		mutex_lock(&il->mutex);
1731  		il_force_scan_end(il);
1732  		mutex_unlock(&il->mutex);
1733  	}
1734  }
1735  EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1736  
1737  /* il->sta_lock must be held */
1738  static void
il_sta_ucode_activate(struct il_priv * il,u8 sta_id)1739  il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1740  {
1741  
1742  	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1743  		IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1744  		       sta_id, il->stations[sta_id].sta.sta.addr);
1745  
1746  	if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1747  		D_ASSOC("STA id %u addr %pM already present"
1748  			" in uCode (according to driver)\n", sta_id,
1749  			il->stations[sta_id].sta.sta.addr);
1750  	} else {
1751  		il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1752  		D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1753  			il->stations[sta_id].sta.sta.addr);
1754  	}
1755  }
1756  
1757  static int
il_process_add_sta_resp(struct il_priv * il,struct il_addsta_cmd * addsta,struct il_rx_pkt * pkt,bool sync)1758  il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1759  			struct il_rx_pkt *pkt, bool sync)
1760  {
1761  	u8 sta_id = addsta->sta.sta_id;
1762  	unsigned long flags;
1763  	int ret = -EIO;
1764  
1765  	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1766  		IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1767  		return ret;
1768  	}
1769  
1770  	D_INFO("Processing response for adding station %u\n", sta_id);
1771  
1772  	spin_lock_irqsave(&il->sta_lock, flags);
1773  
1774  	switch (pkt->u.add_sta.status) {
1775  	case ADD_STA_SUCCESS_MSK:
1776  		D_INFO("C_ADD_STA PASSED\n");
1777  		il_sta_ucode_activate(il, sta_id);
1778  		ret = 0;
1779  		break;
1780  	case ADD_STA_NO_ROOM_IN_TBL:
1781  		IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1782  		break;
1783  	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1784  		IL_ERR("Adding station %d failed, no block ack resource.\n",
1785  		       sta_id);
1786  		break;
1787  	case ADD_STA_MODIFY_NON_EXIST_STA:
1788  		IL_ERR("Attempting to modify non-existing station %d\n",
1789  		       sta_id);
1790  		break;
1791  	default:
1792  		D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1793  		break;
1794  	}
1795  
1796  	D_INFO("%s station id %u addr %pM\n",
1797  	       il->stations[sta_id].sta.mode ==
1798  	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1799  	       il->stations[sta_id].sta.sta.addr);
1800  
1801  	/*
1802  	 * XXX: The MAC address in the command buffer is often changed from
1803  	 * the original sent to the device. That is, the MAC address
1804  	 * written to the command buffer often is not the same MAC address
1805  	 * read from the command buffer when the command returns. This
1806  	 * issue has not yet been resolved and this debugging is left to
1807  	 * observe the problem.
1808  	 */
1809  	D_INFO("%s station according to cmd buffer %pM\n",
1810  	       il->stations[sta_id].sta.mode ==
1811  	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1812  	spin_unlock_irqrestore(&il->sta_lock, flags);
1813  
1814  	return ret;
1815  }
1816  
1817  static void
il_add_sta_callback(struct il_priv * il,struct il_device_cmd * cmd,struct il_rx_pkt * pkt)1818  il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1819  		    struct il_rx_pkt *pkt)
1820  {
1821  	struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1822  
1823  	il_process_add_sta_resp(il, addsta, pkt, false);
1824  
1825  }
1826  
1827  int
il_send_add_sta(struct il_priv * il,struct il_addsta_cmd * sta,u8 flags)1828  il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1829  {
1830  	struct il_rx_pkt *pkt = NULL;
1831  	int ret = 0;
1832  	u8 data[sizeof(*sta)];
1833  	struct il_host_cmd cmd = {
1834  		.id = C_ADD_STA,
1835  		.flags = flags,
1836  		.data = data,
1837  	};
1838  	u8 sta_id __maybe_unused = sta->sta.sta_id;
1839  
1840  	D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1841  	       flags & CMD_ASYNC ? "a" : "");
1842  
1843  	if (flags & CMD_ASYNC)
1844  		cmd.callback = il_add_sta_callback;
1845  	else {
1846  		cmd.flags |= CMD_WANT_SKB;
1847  		might_sleep();
1848  	}
1849  
1850  	cmd.len = il->ops->build_addsta_hcmd(sta, data);
1851  	ret = il_send_cmd(il, &cmd);
1852  	if (ret)
1853  		return ret;
1854  	if (flags & CMD_ASYNC)
1855  		return 0;
1856  
1857  	pkt = (struct il_rx_pkt *)cmd.reply_page;
1858  	ret = il_process_add_sta_resp(il, sta, pkt, true);
1859  
1860  	il_free_pages(il, cmd.reply_page);
1861  
1862  	return ret;
1863  }
1864  EXPORT_SYMBOL(il_send_add_sta);
1865  
1866  static void
il_set_ht_add_station(struct il_priv * il,u8 idx,struct ieee80211_sta * sta)1867  il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1868  {
1869  	struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap;
1870  	__le32 sta_flags;
1871  
1872  	if (!sta || !sta_ht_inf->ht_supported)
1873  		goto done;
1874  
1875  	D_ASSOC("spatial multiplexing power save mode: %s\n",
1876  		(sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1877  		(sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1878  		"disabled");
1879  
1880  	sta_flags = il->stations[idx].sta.station_flags;
1881  
1882  	sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1883  
1884  	switch (sta->deflink.smps_mode) {
1885  	case IEEE80211_SMPS_STATIC:
1886  		sta_flags |= STA_FLG_MIMO_DIS_MSK;
1887  		break;
1888  	case IEEE80211_SMPS_DYNAMIC:
1889  		sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1890  		break;
1891  	case IEEE80211_SMPS_OFF:
1892  		break;
1893  	default:
1894  		IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
1895  		break;
1896  	}
1897  
1898  	sta_flags |=
1899  	    cpu_to_le32((u32) sta_ht_inf->
1900  			ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1901  
1902  	sta_flags |=
1903  	    cpu_to_le32((u32) sta_ht_inf->
1904  			ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1905  
1906  	if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
1907  		sta_flags |= STA_FLG_HT40_EN_MSK;
1908  	else
1909  		sta_flags &= ~STA_FLG_HT40_EN_MSK;
1910  
1911  	il->stations[idx].sta.station_flags = sta_flags;
1912  done:
1913  	return;
1914  }
1915  
1916  /*
1917   * il_prep_station - Prepare station information for addition
1918   *
1919   * should be called with sta_lock held
1920   */
1921  u8
il_prep_station(struct il_priv * il,const u8 * addr,bool is_ap,struct ieee80211_sta * sta)1922  il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1923  		struct ieee80211_sta *sta)
1924  {
1925  	struct il_station_entry *station;
1926  	int i;
1927  	u8 sta_id = IL_INVALID_STATION;
1928  	u16 rate;
1929  
1930  	if (is_ap)
1931  		sta_id = IL_AP_ID;
1932  	else if (is_broadcast_ether_addr(addr))
1933  		sta_id = il->hw_params.bcast_id;
1934  	else
1935  		for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1936  			if (ether_addr_equal(il->stations[i].sta.sta.addr,
1937  					     addr)) {
1938  				sta_id = i;
1939  				break;
1940  			}
1941  
1942  			if (!il->stations[i].used &&
1943  			    sta_id == IL_INVALID_STATION)
1944  				sta_id = i;
1945  		}
1946  
1947  	/*
1948  	 * These two conditions have the same outcome, but keep them
1949  	 * separate
1950  	 */
1951  	if (unlikely(sta_id == IL_INVALID_STATION))
1952  		return sta_id;
1953  
1954  	/*
1955  	 * uCode is not able to deal with multiple requests to add a
1956  	 * station. Keep track if one is in progress so that we do not send
1957  	 * another.
1958  	 */
1959  	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1960  		D_INFO("STA %d already in process of being added.\n", sta_id);
1961  		return sta_id;
1962  	}
1963  
1964  	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1965  	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1966  	    ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1967  		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1968  			sta_id, addr);
1969  		return sta_id;
1970  	}
1971  
1972  	station = &il->stations[sta_id];
1973  	station->used = IL_STA_DRIVER_ACTIVE;
1974  	D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1975  	il->num_stations++;
1976  
1977  	/* Set up the C_ADD_STA command to send to device */
1978  	memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1979  	memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1980  	station->sta.mode = 0;
1981  	station->sta.sta.sta_id = sta_id;
1982  	station->sta.station_flags = 0;
1983  
1984  	/*
1985  	 * OK to call unconditionally, since local stations (IBSS BSSID
1986  	 * STA and broadcast STA) pass in a NULL sta, and mac80211
1987  	 * doesn't allow HT IBSS.
1988  	 */
1989  	il_set_ht_add_station(il, sta_id, sta);
1990  
1991  	/* 3945 only */
1992  	rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1993  	/* Turn on both antennas for the station... */
1994  	station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1995  
1996  	return sta_id;
1997  
1998  }
1999  EXPORT_SYMBOL_GPL(il_prep_station);
2000  
2001  #define STA_WAIT_TIMEOUT (HZ/2)
2002  
2003  /*
2004   * il_add_station_common -
2005   */
2006  int
il_add_station_common(struct il_priv * il,const u8 * addr,bool is_ap,struct ieee80211_sta * sta,u8 * sta_id_r)2007  il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2008  		      struct ieee80211_sta *sta, u8 *sta_id_r)
2009  {
2010  	unsigned long flags_spin;
2011  	int ret = 0;
2012  	u8 sta_id;
2013  	struct il_addsta_cmd sta_cmd;
2014  
2015  	*sta_id_r = 0;
2016  	spin_lock_irqsave(&il->sta_lock, flags_spin);
2017  	sta_id = il_prep_station(il, addr, is_ap, sta);
2018  	if (sta_id == IL_INVALID_STATION) {
2019  		IL_ERR("Unable to prepare station %pM for addition\n", addr);
2020  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2021  		return -EINVAL;
2022  	}
2023  
2024  	/*
2025  	 * uCode is not able to deal with multiple requests to add a
2026  	 * station. Keep track if one is in progress so that we do not send
2027  	 * another.
2028  	 */
2029  	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2030  		D_INFO("STA %d already in process of being added.\n", sta_id);
2031  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2032  		return -EEXIST;
2033  	}
2034  
2035  	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2036  	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2037  		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2038  			sta_id, addr);
2039  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2040  		return -EEXIST;
2041  	}
2042  
2043  	il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2044  	memcpy(&sta_cmd, &il->stations[sta_id].sta,
2045  	       sizeof(struct il_addsta_cmd));
2046  	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2047  
2048  	/* Add station to device's station table */
2049  	ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2050  	if (ret) {
2051  		spin_lock_irqsave(&il->sta_lock, flags_spin);
2052  		IL_ERR("Adding station %pM failed.\n",
2053  		       il->stations[sta_id].sta.sta.addr);
2054  		il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2055  		il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2056  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2057  	}
2058  	*sta_id_r = sta_id;
2059  	return ret;
2060  }
2061  EXPORT_SYMBOL(il_add_station_common);
2062  
2063  /*
2064   * il_sta_ucode_deactivate - deactivate ucode status for a station
2065   *
2066   * il->sta_lock must be held
2067   */
2068  static void
il_sta_ucode_deactivate(struct il_priv * il,u8 sta_id)2069  il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2070  {
2071  	/* Ucode must be active and driver must be non active */
2072  	if ((il->stations[sta_id].
2073  	     used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2074  	    IL_STA_UCODE_ACTIVE)
2075  		IL_ERR("removed non active STA %u\n", sta_id);
2076  
2077  	il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2078  
2079  	memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2080  	D_ASSOC("Removed STA %u\n", sta_id);
2081  }
2082  
2083  static int
il_send_remove_station(struct il_priv * il,const u8 * addr,int sta_id,bool temporary)2084  il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2085  		       bool temporary)
2086  {
2087  	struct il_rx_pkt *pkt;
2088  	int ret;
2089  
2090  	unsigned long flags_spin;
2091  	struct il_rem_sta_cmd rm_sta_cmd;
2092  
2093  	struct il_host_cmd cmd = {
2094  		.id = C_REM_STA,
2095  		.len = sizeof(struct il_rem_sta_cmd),
2096  		.flags = CMD_SYNC,
2097  		.data = &rm_sta_cmd,
2098  	};
2099  
2100  	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2101  	rm_sta_cmd.num_sta = 1;
2102  	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2103  
2104  	cmd.flags |= CMD_WANT_SKB;
2105  
2106  	ret = il_send_cmd(il, &cmd);
2107  
2108  	if (ret)
2109  		return ret;
2110  
2111  	pkt = (struct il_rx_pkt *)cmd.reply_page;
2112  	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2113  		IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2114  		ret = -EIO;
2115  	}
2116  
2117  	if (!ret) {
2118  		switch (pkt->u.rem_sta.status) {
2119  		case REM_STA_SUCCESS_MSK:
2120  			if (!temporary) {
2121  				spin_lock_irqsave(&il->sta_lock, flags_spin);
2122  				il_sta_ucode_deactivate(il, sta_id);
2123  				spin_unlock_irqrestore(&il->sta_lock,
2124  						       flags_spin);
2125  			}
2126  			D_ASSOC("C_REM_STA PASSED\n");
2127  			break;
2128  		default:
2129  			ret = -EIO;
2130  			IL_ERR("C_REM_STA failed\n");
2131  			break;
2132  		}
2133  	}
2134  	il_free_pages(il, cmd.reply_page);
2135  
2136  	return ret;
2137  }
2138  
2139  /*
2140   * il_remove_station - Remove driver's knowledge of station.
2141   */
2142  int
il_remove_station(struct il_priv * il,const u8 sta_id,const u8 * addr)2143  il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2144  {
2145  	unsigned long flags;
2146  
2147  	if (!il_is_ready(il)) {
2148  		D_INFO("Unable to remove station %pM, device not ready.\n",
2149  		       addr);
2150  		/*
2151  		 * It is typical for stations to be removed when we are
2152  		 * going down. Return success since device will be down
2153  		 * soon anyway
2154  		 */
2155  		return 0;
2156  	}
2157  
2158  	D_ASSOC("Removing STA from driver:%d  %pM\n", sta_id, addr);
2159  
2160  	if (WARN_ON(sta_id == IL_INVALID_STATION))
2161  		return -EINVAL;
2162  
2163  	spin_lock_irqsave(&il->sta_lock, flags);
2164  
2165  	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2166  		D_INFO("Removing %pM but non DRIVER active\n", addr);
2167  		goto out_err;
2168  	}
2169  
2170  	if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2171  		D_INFO("Removing %pM but non UCODE active\n", addr);
2172  		goto out_err;
2173  	}
2174  
2175  	if (il->stations[sta_id].used & IL_STA_LOCAL) {
2176  		kfree(il->stations[sta_id].lq);
2177  		il->stations[sta_id].lq = NULL;
2178  	}
2179  
2180  	il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2181  
2182  	il->num_stations--;
2183  
2184  	BUG_ON(il->num_stations < 0);
2185  
2186  	spin_unlock_irqrestore(&il->sta_lock, flags);
2187  
2188  	return il_send_remove_station(il, addr, sta_id, false);
2189  out_err:
2190  	spin_unlock_irqrestore(&il->sta_lock, flags);
2191  	return -EINVAL;
2192  }
2193  EXPORT_SYMBOL_GPL(il_remove_station);
2194  
2195  /*
2196   * il_clear_ucode_stations - clear ucode station table bits
2197   *
2198   * This function clears all the bits in the driver indicating
2199   * which stations are active in the ucode. Call when something
2200   * other than explicit station management would cause this in
2201   * the ucode, e.g. unassociated RXON.
2202   */
2203  void
il_clear_ucode_stations(struct il_priv * il)2204  il_clear_ucode_stations(struct il_priv *il)
2205  {
2206  	int i;
2207  	unsigned long flags_spin;
2208  	bool cleared = false;
2209  
2210  	D_INFO("Clearing ucode stations in driver\n");
2211  
2212  	spin_lock_irqsave(&il->sta_lock, flags_spin);
2213  	for (i = 0; i < il->hw_params.max_stations; i++) {
2214  		if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2215  			D_INFO("Clearing ucode active for station %d\n", i);
2216  			il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2217  			cleared = true;
2218  		}
2219  	}
2220  	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2221  
2222  	if (!cleared)
2223  		D_INFO("No active stations found to be cleared\n");
2224  }
2225  EXPORT_SYMBOL(il_clear_ucode_stations);
2226  
2227  /*
2228   * il_restore_stations() - Restore driver known stations to device
2229   *
2230   * All stations considered active by driver, but not present in ucode, is
2231   * restored.
2232   *
2233   * Function sleeps.
2234   */
2235  void
il_restore_stations(struct il_priv * il)2236  il_restore_stations(struct il_priv *il)
2237  {
2238  	struct il_addsta_cmd sta_cmd;
2239  	struct il_link_quality_cmd lq;
2240  	unsigned long flags_spin;
2241  	int i;
2242  	bool found = false;
2243  	int ret;
2244  	bool send_lq;
2245  
2246  	if (!il_is_ready(il)) {
2247  		D_INFO("Not ready yet, not restoring any stations.\n");
2248  		return;
2249  	}
2250  
2251  	D_ASSOC("Restoring all known stations ... start.\n");
2252  	spin_lock_irqsave(&il->sta_lock, flags_spin);
2253  	for (i = 0; i < il->hw_params.max_stations; i++) {
2254  		if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2255  		    !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2256  			D_ASSOC("Restoring sta %pM\n",
2257  				il->stations[i].sta.sta.addr);
2258  			il->stations[i].sta.mode = 0;
2259  			il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2260  			found = true;
2261  		}
2262  	}
2263  
2264  	for (i = 0; i < il->hw_params.max_stations; i++) {
2265  		if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2266  			memcpy(&sta_cmd, &il->stations[i].sta,
2267  			       sizeof(struct il_addsta_cmd));
2268  			send_lq = false;
2269  			if (il->stations[i].lq) {
2270  				memcpy(&lq, il->stations[i].lq,
2271  				       sizeof(struct il_link_quality_cmd));
2272  				send_lq = true;
2273  			}
2274  			spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2275  			ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2276  			if (ret) {
2277  				spin_lock_irqsave(&il->sta_lock, flags_spin);
2278  				IL_ERR("Adding station %pM failed.\n",
2279  				       il->stations[i].sta.sta.addr);
2280  				il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2281  				il->stations[i].used &=
2282  				    ~IL_STA_UCODE_INPROGRESS;
2283  				spin_unlock_irqrestore(&il->sta_lock,
2284  						       flags_spin);
2285  			}
2286  			/*
2287  			 * Rate scaling has already been initialized, send
2288  			 * current LQ command
2289  			 */
2290  			if (send_lq)
2291  				il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2292  			spin_lock_irqsave(&il->sta_lock, flags_spin);
2293  			il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2294  		}
2295  	}
2296  
2297  	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2298  	if (!found)
2299  		D_INFO("Restoring all known stations"
2300  		       " .... no stations to be restored.\n");
2301  	else
2302  		D_INFO("Restoring all known stations" " .... complete.\n");
2303  }
2304  EXPORT_SYMBOL(il_restore_stations);
2305  
2306  int
il_get_free_ucode_key_idx(struct il_priv * il)2307  il_get_free_ucode_key_idx(struct il_priv *il)
2308  {
2309  	int i;
2310  
2311  	for (i = 0; i < il->sta_key_max_num; i++)
2312  		if (!test_and_set_bit(i, &il->ucode_key_table))
2313  			return i;
2314  
2315  	return WEP_INVALID_OFFSET;
2316  }
2317  EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2318  
2319  void
il_dealloc_bcast_stations(struct il_priv * il)2320  il_dealloc_bcast_stations(struct il_priv *il)
2321  {
2322  	unsigned long flags;
2323  	int i;
2324  
2325  	spin_lock_irqsave(&il->sta_lock, flags);
2326  	for (i = 0; i < il->hw_params.max_stations; i++) {
2327  		if (!(il->stations[i].used & IL_STA_BCAST))
2328  			continue;
2329  
2330  		il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2331  		il->num_stations--;
2332  		BUG_ON(il->num_stations < 0);
2333  		kfree(il->stations[i].lq);
2334  		il->stations[i].lq = NULL;
2335  	}
2336  	spin_unlock_irqrestore(&il->sta_lock, flags);
2337  }
2338  EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2339  
2340  #ifdef CONFIG_IWLEGACY_DEBUG
2341  static void
il_dump_lq_cmd(struct il_priv * il,struct il_link_quality_cmd * lq)2342  il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2343  {
2344  	int i;
2345  	D_RATE("lq station id 0x%x\n", lq->sta_id);
2346  	D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2347  	       lq->general_params.dual_stream_ant_msk);
2348  
2349  	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2350  		D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2351  }
2352  #else
2353  static inline void
il_dump_lq_cmd(struct il_priv * il,struct il_link_quality_cmd * lq)2354  il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2355  {
2356  }
2357  #endif
2358  
2359  /*
2360   * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2361   *
2362   * It sometimes happens when a HT rate has been in use and we
2363   * loose connectivity with AP then mac80211 will first tell us that the
2364   * current channel is not HT anymore before removing the station. In such a
2365   * scenario the RXON flags will be updated to indicate we are not
2366   * communicating HT anymore, but the LQ command may still contain HT rates.
2367   * Test for this to prevent driver from sending LQ command between the time
2368   * RXON flags are updated and when LQ command is updated.
2369   */
2370  static bool
il_is_lq_table_valid(struct il_priv * il,struct il_link_quality_cmd * lq)2371  il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2372  {
2373  	int i;
2374  
2375  	if (il->ht.enabled)
2376  		return true;
2377  
2378  	D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2379  	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2380  		if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2381  			D_INFO("idx %d of LQ expects HT channel\n", i);
2382  			return false;
2383  		}
2384  	}
2385  	return true;
2386  }
2387  
2388  /*
2389   * il_send_lq_cmd() - Send link quality command
2390   * @init: This command is sent as part of station initialization right
2391   *        after station has been added.
2392   *
2393   * The link quality command is sent as the last step of station creation.
2394   * This is the special case in which init is set and we call a callback in
2395   * this case to clear the state indicating that station creation is in
2396   * progress.
2397   */
2398  int
il_send_lq_cmd(struct il_priv * il,struct il_link_quality_cmd * lq,u8 flags,bool init)2399  il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2400  	       u8 flags, bool init)
2401  {
2402  	int ret = 0;
2403  	unsigned long flags_spin;
2404  
2405  	struct il_host_cmd cmd = {
2406  		.id = C_TX_LINK_QUALITY_CMD,
2407  		.len = sizeof(struct il_link_quality_cmd),
2408  		.flags = flags,
2409  		.data = lq,
2410  	};
2411  
2412  	if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2413  		return -EINVAL;
2414  
2415  	spin_lock_irqsave(&il->sta_lock, flags_spin);
2416  	if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2417  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2418  		return -EINVAL;
2419  	}
2420  	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2421  
2422  	il_dump_lq_cmd(il, lq);
2423  	BUG_ON(init && (cmd.flags & CMD_ASYNC));
2424  
2425  	if (il_is_lq_table_valid(il, lq))
2426  		ret = il_send_cmd(il, &cmd);
2427  	else
2428  		ret = -EINVAL;
2429  
2430  	if (cmd.flags & CMD_ASYNC)
2431  		return ret;
2432  
2433  	if (init) {
2434  		D_INFO("init LQ command complete,"
2435  		       " clearing sta addition status for sta %d\n",
2436  		       lq->sta_id);
2437  		spin_lock_irqsave(&il->sta_lock, flags_spin);
2438  		il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2439  		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2440  	}
2441  	return ret;
2442  }
2443  EXPORT_SYMBOL(il_send_lq_cmd);
2444  
2445  int
il_mac_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2446  il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2447  		  struct ieee80211_sta *sta)
2448  {
2449  	struct il_priv *il = hw->priv;
2450  	struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2451  	int ret;
2452  
2453  	mutex_lock(&il->mutex);
2454  	D_MAC80211("enter station %pM\n", sta->addr);
2455  
2456  	ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2457  	if (ret)
2458  		IL_ERR("Error removing station %pM\n", sta->addr);
2459  
2460  	D_MAC80211("leave ret %d\n", ret);
2461  	mutex_unlock(&il->mutex);
2462  
2463  	return ret;
2464  }
2465  EXPORT_SYMBOL(il_mac_sta_remove);
2466  
2467  /************************** RX-FUNCTIONS ****************************/
2468  /*
2469   * Rx theory of operation
2470   *
2471   * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2472   * each of which point to Receive Buffers to be filled by the NIC.  These get
2473   * used not only for Rx frames, but for any command response or notification
2474   * from the NIC.  The driver and NIC manage the Rx buffers by means
2475   * of idxes into the circular buffer.
2476   *
2477   * Rx Queue Indexes
2478   * The host/firmware share two idx registers for managing the Rx buffers.
2479   *
2480   * The READ idx maps to the first position that the firmware may be writing
2481   * to -- the driver can read up to (but not including) this position and get
2482   * good data.
2483   * The READ idx is managed by the firmware once the card is enabled.
2484   *
2485   * The WRITE idx maps to the last position the driver has read from -- the
2486   * position preceding WRITE is the last slot the firmware can place a packet.
2487   *
2488   * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2489   * WRITE = READ.
2490   *
2491   * During initialization, the host sets up the READ queue position to the first
2492   * IDX position, and WRITE to the last (READ - 1 wrapped)
2493   *
2494   * When the firmware places a packet in a buffer, it will advance the READ idx
2495   * and fire the RX interrupt.  The driver can then query the READ idx and
2496   * process as many packets as possible, moving the WRITE idx forward as it
2497   * resets the Rx queue buffers with new memory.
2498   *
2499   * The management in the driver is as follows:
2500   * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
2501   *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2502   *   to replenish the iwl->rxq->rx_free.
2503   * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2504   *   iwl->rxq is replenished and the READ IDX is updated (updating the
2505   *   'processed' and 'read' driver idxes as well)
2506   * + A received packet is processed and handed to the kernel network stack,
2507   *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
2508   * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2509   *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2510   *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
2511   *   were enough free buffers and RX_STALLED is set it is cleared.
2512   *
2513   *
2514   * Driver sequence:
2515   *
2516   * il_rx_queue_alloc()   Allocates rx_free
2517   * il_rx_replenish()     Replenishes rx_free list from rx_used, and calls
2518   *                            il_rx_queue_restock
2519   * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2520   *                            queue, updates firmware pointers, and updates
2521   *                            the WRITE idx.  If insufficient rx_free buffers
2522   *                            are available, schedules il_rx_replenish
2523   *
2524   * -- enable interrupts --
2525   * ISR - il_rx()         Detach il_rx_bufs from pool up to the
2526   *                            READ IDX, detaching the SKB from the pool.
2527   *                            Moves the packet buffer from queue to rx_used.
2528   *                            Calls il_rx_queue_restock to refill any empty
2529   *                            slots.
2530   * ...
2531   *
2532   */
2533  
2534  /*
2535   * il_rx_queue_space - Return number of free slots available in queue.
2536   */
2537  int
il_rx_queue_space(const struct il_rx_queue * q)2538  il_rx_queue_space(const struct il_rx_queue *q)
2539  {
2540  	int s = q->read - q->write;
2541  	if (s <= 0)
2542  		s += RX_QUEUE_SIZE;
2543  	/* keep some buffer to not confuse full and empty queue */
2544  	s -= 2;
2545  	if (s < 0)
2546  		s = 0;
2547  	return s;
2548  }
2549  EXPORT_SYMBOL(il_rx_queue_space);
2550  
2551  /*
2552   * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2553   */
2554  void
il_rx_queue_update_write_ptr(struct il_priv * il,struct il_rx_queue * q)2555  il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2556  {
2557  	unsigned long flags;
2558  	u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2559  	u32 reg;
2560  
2561  	spin_lock_irqsave(&q->lock, flags);
2562  
2563  	if (q->need_update == 0)
2564  		goto exit_unlock;
2565  
2566  	/* If power-saving is in use, make sure device is awake */
2567  	if (test_bit(S_POWER_PMI, &il->status)) {
2568  		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2569  
2570  		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2571  			D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2572  			       reg);
2573  			il_set_bit(il, CSR_GP_CNTRL,
2574  				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2575  			goto exit_unlock;
2576  		}
2577  
2578  		q->write_actual = (q->write & ~0x7);
2579  		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2580  
2581  		/* Else device is assumed to be awake */
2582  	} else {
2583  		/* Device expects a multiple of 8 */
2584  		q->write_actual = (q->write & ~0x7);
2585  		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2586  	}
2587  
2588  	q->need_update = 0;
2589  
2590  exit_unlock:
2591  	spin_unlock_irqrestore(&q->lock, flags);
2592  }
2593  EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2594  
2595  int
il_rx_queue_alloc(struct il_priv * il)2596  il_rx_queue_alloc(struct il_priv *il)
2597  {
2598  	struct il_rx_queue *rxq = &il->rxq;
2599  	struct device *dev = &il->pci_dev->dev;
2600  	int i;
2601  
2602  	spin_lock_init(&rxq->lock);
2603  	INIT_LIST_HEAD(&rxq->rx_free);
2604  	INIT_LIST_HEAD(&rxq->rx_used);
2605  
2606  	/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2607  	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2608  				     GFP_KERNEL);
2609  	if (!rxq->bd)
2610  		goto err_bd;
2611  
2612  	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2613  					  &rxq->rb_stts_dma, GFP_KERNEL);
2614  	if (!rxq->rb_stts)
2615  		goto err_rb;
2616  
2617  	/* Fill the rx_used queue with _all_ of the Rx buffers */
2618  	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2619  		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2620  
2621  	/* Set us so that we have processed and used all buffers, but have
2622  	 * not restocked the Rx queue with fresh buffers */
2623  	rxq->read = rxq->write = 0;
2624  	rxq->write_actual = 0;
2625  	rxq->free_count = 0;
2626  	rxq->need_update = 0;
2627  	return 0;
2628  
2629  err_rb:
2630  	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2631  			  rxq->bd_dma);
2632  err_bd:
2633  	return -ENOMEM;
2634  }
2635  EXPORT_SYMBOL(il_rx_queue_alloc);
2636  
2637  void
il_hdl_spectrum_measurement(struct il_priv * il,struct il_rx_buf * rxb)2638  il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2639  {
2640  	struct il_rx_pkt *pkt = rxb_addr(rxb);
2641  	struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2642  
2643  	if (!report->state) {
2644  		D_11H("Spectrum Measure Notification: Start\n");
2645  		return;
2646  	}
2647  
2648  	memcpy(&il->measure_report, report, sizeof(*report));
2649  	il->measurement_status |= MEASUREMENT_READY;
2650  }
2651  EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2652  
2653  /*
2654   * returns non-zero if packet should be dropped
2655   */
2656  int
il_set_decrypted_flag(struct il_priv * il,struct ieee80211_hdr * hdr,u32 decrypt_res,struct ieee80211_rx_status * stats)2657  il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2658  		      u32 decrypt_res, struct ieee80211_rx_status *stats)
2659  {
2660  	u16 fc = le16_to_cpu(hdr->frame_control);
2661  
2662  	/*
2663  	 * All contexts have the same setting here due to it being
2664  	 * a module parameter, so OK to check any context.
2665  	 */
2666  	if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2667  		return 0;
2668  
2669  	if (!(fc & IEEE80211_FCTL_PROTECTED))
2670  		return 0;
2671  
2672  	D_RX("decrypt_res:0x%x\n", decrypt_res);
2673  	switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2674  	case RX_RES_STATUS_SEC_TYPE_TKIP:
2675  		/* The uCode has got a bad phase 1 Key, pushes the packet.
2676  		 * Decryption will be done in SW. */
2677  		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2678  		    RX_RES_STATUS_BAD_KEY_TTAK)
2679  			break;
2680  		fallthrough;
2681  
2682  	case RX_RES_STATUS_SEC_TYPE_WEP:
2683  		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2684  		    RX_RES_STATUS_BAD_ICV_MIC) {
2685  			/* bad ICV, the packet is destroyed since the
2686  			 * decryption is inplace, drop it */
2687  			D_RX("Packet destroyed\n");
2688  			return -1;
2689  		}
2690  		fallthrough;
2691  	case RX_RES_STATUS_SEC_TYPE_CCMP:
2692  		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2693  		    RX_RES_STATUS_DECRYPT_OK) {
2694  			D_RX("hw decrypt successfully!!!\n");
2695  			stats->flag |= RX_FLAG_DECRYPTED;
2696  		}
2697  		break;
2698  
2699  	default:
2700  		break;
2701  	}
2702  	return 0;
2703  }
2704  EXPORT_SYMBOL(il_set_decrypted_flag);
2705  
2706  /*
2707   * il_txq_update_write_ptr - Send new write idx to hardware
2708   */
2709  void
il_txq_update_write_ptr(struct il_priv * il,struct il_tx_queue * txq)2710  il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2711  {
2712  	u32 reg = 0;
2713  	int txq_id = txq->q.id;
2714  
2715  	if (txq->need_update == 0)
2716  		return;
2717  
2718  	/* if we're trying to save power */
2719  	if (test_bit(S_POWER_PMI, &il->status)) {
2720  		/* wake up nic if it's powered down ...
2721  		 * uCode will wake up, and interrupt us again, so next
2722  		 * time we'll skip this part. */
2723  		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2724  
2725  		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2726  			D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2727  			       txq_id, reg);
2728  			il_set_bit(il, CSR_GP_CNTRL,
2729  				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2730  			return;
2731  		}
2732  
2733  		il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2734  
2735  		/*
2736  		 * else not in power-save mode,
2737  		 * uCode will never sleep when we're
2738  		 * trying to tx (during RFKILL, we're not trying to tx).
2739  		 */
2740  	} else
2741  		_il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2742  	txq->need_update = 0;
2743  }
2744  EXPORT_SYMBOL(il_txq_update_write_ptr);
2745  
2746  /*
2747   * il_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
2748   */
2749  void
il_tx_queue_unmap(struct il_priv * il,int txq_id)2750  il_tx_queue_unmap(struct il_priv *il, int txq_id)
2751  {
2752  	struct il_tx_queue *txq = &il->txq[txq_id];
2753  	struct il_queue *q = &txq->q;
2754  
2755  	if (q->n_bd == 0)
2756  		return;
2757  
2758  	while (q->write_ptr != q->read_ptr) {
2759  		il->ops->txq_free_tfd(il, txq);
2760  		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2761  	}
2762  }
2763  EXPORT_SYMBOL(il_tx_queue_unmap);
2764  
2765  /*
2766   * il_tx_queue_free - Deallocate DMA queue.
2767   * @txq: Transmit queue to deallocate.
2768   *
2769   * Empty queue by removing and destroying all BD's.
2770   * Free all buffers.
2771   * 0-fill, but do not free "txq" descriptor structure.
2772   */
2773  void
il_tx_queue_free(struct il_priv * il,int txq_id)2774  il_tx_queue_free(struct il_priv *il, int txq_id)
2775  {
2776  	struct il_tx_queue *txq = &il->txq[txq_id];
2777  	struct device *dev = &il->pci_dev->dev;
2778  	int i;
2779  
2780  	il_tx_queue_unmap(il, txq_id);
2781  
2782  	/* De-alloc array of command/tx buffers */
2783  	if (txq->cmd) {
2784  		for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2785  			kfree(txq->cmd[i]);
2786  	}
2787  
2788  	/* De-alloc circular buffer of TFDs */
2789  	if (txq->q.n_bd)
2790  		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2791  				  txq->tfds, txq->q.dma_addr);
2792  
2793  	/* De-alloc array of per-TFD driver data */
2794  	kfree(txq->skbs);
2795  	txq->skbs = NULL;
2796  
2797  	/* deallocate arrays */
2798  	kfree(txq->cmd);
2799  	kfree(txq->meta);
2800  	txq->cmd = NULL;
2801  	txq->meta = NULL;
2802  
2803  	/* 0-fill queue descriptor structure */
2804  	memset(txq, 0, sizeof(*txq));
2805  }
2806  EXPORT_SYMBOL(il_tx_queue_free);
2807  
2808  /*
2809   * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2810   */
2811  void
il_cmd_queue_unmap(struct il_priv * il)2812  il_cmd_queue_unmap(struct il_priv *il)
2813  {
2814  	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2815  	struct il_queue *q = &txq->q;
2816  	int i;
2817  
2818  	if (q->n_bd == 0)
2819  		return;
2820  
2821  	while (q->read_ptr != q->write_ptr) {
2822  		i = il_get_cmd_idx(q, q->read_ptr, 0);
2823  
2824  		if (txq->meta[i].flags & CMD_MAPPED) {
2825  			dma_unmap_single(&il->pci_dev->dev,
2826  					 dma_unmap_addr(&txq->meta[i], mapping),
2827  					 dma_unmap_len(&txq->meta[i], len),
2828  					 DMA_BIDIRECTIONAL);
2829  			txq->meta[i].flags = 0;
2830  		}
2831  
2832  		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2833  	}
2834  
2835  	i = q->n_win;
2836  	if (txq->meta[i].flags & CMD_MAPPED) {
2837  		dma_unmap_single(&il->pci_dev->dev,
2838  				 dma_unmap_addr(&txq->meta[i], mapping),
2839  				 dma_unmap_len(&txq->meta[i], len),
2840  				 DMA_BIDIRECTIONAL);
2841  		txq->meta[i].flags = 0;
2842  	}
2843  }
2844  EXPORT_SYMBOL(il_cmd_queue_unmap);
2845  
2846  /*
2847   * il_cmd_queue_free - Deallocate DMA queue.
2848   *
2849   * Empty queue by removing and destroying all BD's.
2850   * Free all buffers.
2851   * 0-fill, but do not free "txq" descriptor structure.
2852   */
2853  void
il_cmd_queue_free(struct il_priv * il)2854  il_cmd_queue_free(struct il_priv *il)
2855  {
2856  	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2857  	struct device *dev = &il->pci_dev->dev;
2858  	int i;
2859  
2860  	il_cmd_queue_unmap(il);
2861  
2862  	/* De-alloc array of command/tx buffers */
2863  	if (txq->cmd) {
2864  		for (i = 0; i <= TFD_CMD_SLOTS; i++)
2865  			kfree(txq->cmd[i]);
2866  	}
2867  
2868  	/* De-alloc circular buffer of TFDs */
2869  	if (txq->q.n_bd)
2870  		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2871  				  txq->tfds, txq->q.dma_addr);
2872  
2873  	/* deallocate arrays */
2874  	kfree(txq->cmd);
2875  	kfree(txq->meta);
2876  	txq->cmd = NULL;
2877  	txq->meta = NULL;
2878  
2879  	/* 0-fill queue descriptor structure */
2880  	memset(txq, 0, sizeof(*txq));
2881  }
2882  EXPORT_SYMBOL(il_cmd_queue_free);
2883  
2884  /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
2885   * DMA services
2886   *
2887   * Theory of operation
2888   *
2889   * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2890   * of buffer descriptors, each of which points to one or more data buffers for
2891   * the device to read from or fill.  Driver and device exchange status of each
2892   * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
2893   * entries in each circular buffer, to protect against confusing empty and full
2894   * queue states.
2895   *
2896   * The device reads or writes the data in the queues via the device's several
2897   * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
2898   *
2899   * For Tx queue, there are low mark and high mark limits. If, after queuing
2900   * the packet for Tx, free space become < low mark, Tx queue stopped. When
2901   * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2902   * Tx queue resumed.
2903   *
2904   * See more detailed info in 4965.h.
2905   ***************************************************/
2906  
2907  int
il_queue_space(const struct il_queue * q)2908  il_queue_space(const struct il_queue *q)
2909  {
2910  	int s = q->read_ptr - q->write_ptr;
2911  
2912  	if (q->read_ptr > q->write_ptr)
2913  		s -= q->n_bd;
2914  
2915  	if (s <= 0)
2916  		s += q->n_win;
2917  	/* keep some reserve to not confuse empty and full situations */
2918  	s -= 2;
2919  	if (s < 0)
2920  		s = 0;
2921  	return s;
2922  }
2923  EXPORT_SYMBOL(il_queue_space);
2924  
2925  
2926  /*
2927   * il_queue_init - Initialize queue's high/low-water and read/write idxes
2928   */
2929  static int
il_queue_init(struct il_priv * il,struct il_queue * q,int slots,u32 id)2930  il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2931  {
2932  	/*
2933  	 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2934  	 * il_queue_inc_wrap and il_queue_dec_wrap are broken.
2935  	 */
2936  	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2937  	/* FIXME: remove q->n_bd */
2938  	q->n_bd = TFD_QUEUE_SIZE_MAX;
2939  
2940  	q->n_win = slots;
2941  	q->id = id;
2942  
2943  	/* slots_must be power-of-two size, otherwise
2944  	 * il_get_cmd_idx is broken. */
2945  	BUG_ON(!is_power_of_2(slots));
2946  
2947  	q->low_mark = q->n_win / 4;
2948  	if (q->low_mark < 4)
2949  		q->low_mark = 4;
2950  
2951  	q->high_mark = q->n_win / 8;
2952  	if (q->high_mark < 2)
2953  		q->high_mark = 2;
2954  
2955  	q->write_ptr = q->read_ptr = 0;
2956  
2957  	return 0;
2958  }
2959  
2960  /*
2961   * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2962   */
2963  static int
il_tx_queue_alloc(struct il_priv * il,struct il_tx_queue * txq,u32 id)2964  il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2965  {
2966  	struct device *dev = &il->pci_dev->dev;
2967  	size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2968  
2969  	/* Driver ilate data, only for Tx (not command) queues,
2970  	 * not shared with device. */
2971  	if (id != il->cmd_queue) {
2972  		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2973  				    sizeof(struct sk_buff *),
2974  				    GFP_KERNEL);
2975  		if (!txq->skbs) {
2976  			IL_ERR("Fail to alloc skbs\n");
2977  			goto error;
2978  		}
2979  	} else
2980  		txq->skbs = NULL;
2981  
2982  	/* Circular buffer of transmit frame descriptors (TFDs),
2983  	 * shared with device */
2984  	txq->tfds =
2985  	    dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2986  	if (!txq->tfds)
2987  		goto error;
2988  
2989  	txq->q.id = id;
2990  
2991  	return 0;
2992  
2993  error:
2994  	kfree(txq->skbs);
2995  	txq->skbs = NULL;
2996  
2997  	return -ENOMEM;
2998  }
2999  
3000  /*
3001   * il_tx_queue_init - Allocate and initialize one tx/cmd queue
3002   */
3003  int
il_tx_queue_init(struct il_priv * il,u32 txq_id)3004  il_tx_queue_init(struct il_priv *il, u32 txq_id)
3005  {
3006  	int i, len, ret;
3007  	int slots, actual_slots;
3008  	struct il_tx_queue *txq = &il->txq[txq_id];
3009  
3010  	/*
3011  	 * Alloc buffer array for commands (Tx or other types of commands).
3012  	 * For the command queue (#4/#9), allocate command space + one big
3013  	 * command for scan, since scan command is very huge; the system will
3014  	 * not have two scans at the same time, so only one is needed.
3015  	 * For normal Tx queues (all other queues), no super-size command
3016  	 * space is needed.
3017  	 */
3018  	if (txq_id == il->cmd_queue) {
3019  		slots = TFD_CMD_SLOTS;
3020  		actual_slots = slots + 1;
3021  	} else {
3022  		slots = TFD_TX_CMD_SLOTS;
3023  		actual_slots = slots;
3024  	}
3025  
3026  	txq->meta =
3027  	    kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL);
3028  	txq->cmd =
3029  	    kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL);
3030  
3031  	if (!txq->meta || !txq->cmd)
3032  		goto out_free_arrays;
3033  
3034  	len = sizeof(struct il_device_cmd);
3035  	for (i = 0; i < actual_slots; i++) {
3036  		/* only happens for cmd queue */
3037  		if (i == slots)
3038  			len = IL_MAX_CMD_SIZE;
3039  
3040  		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3041  		if (!txq->cmd[i])
3042  			goto err;
3043  	}
3044  
3045  	/* Alloc driver data array and TFD circular buffer */
3046  	ret = il_tx_queue_alloc(il, txq, txq_id);
3047  	if (ret)
3048  		goto err;
3049  
3050  	txq->need_update = 0;
3051  
3052  	/*
3053  	 * For the default queues 0-3, set up the swq_id
3054  	 * already -- all others need to get one later
3055  	 * (if they need one at all).
3056  	 */
3057  	if (txq_id < 4)
3058  		il_set_swq_id(txq, txq_id, txq_id);
3059  
3060  	/* Initialize queue's high/low-water marks, and head/tail idxes */
3061  	il_queue_init(il, &txq->q, slots, txq_id);
3062  
3063  	/* Tell device where to find queue */
3064  	il->ops->txq_init(il, txq);
3065  
3066  	return 0;
3067  err:
3068  	for (i = 0; i < actual_slots; i++)
3069  		kfree(txq->cmd[i]);
3070  out_free_arrays:
3071  	kfree(txq->meta);
3072  	txq->meta = NULL;
3073  	kfree(txq->cmd);
3074  	txq->cmd = NULL;
3075  
3076  	return -ENOMEM;
3077  }
3078  EXPORT_SYMBOL(il_tx_queue_init);
3079  
3080  void
il_tx_queue_reset(struct il_priv * il,u32 txq_id)3081  il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3082  {
3083  	int slots, actual_slots;
3084  	struct il_tx_queue *txq = &il->txq[txq_id];
3085  
3086  	if (txq_id == il->cmd_queue) {
3087  		slots = TFD_CMD_SLOTS;
3088  		actual_slots = TFD_CMD_SLOTS + 1;
3089  	} else {
3090  		slots = TFD_TX_CMD_SLOTS;
3091  		actual_slots = TFD_TX_CMD_SLOTS;
3092  	}
3093  
3094  	memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3095  	txq->need_update = 0;
3096  
3097  	/* Initialize queue's high/low-water marks, and head/tail idxes */
3098  	il_queue_init(il, &txq->q, slots, txq_id);
3099  
3100  	/* Tell device where to find queue */
3101  	il->ops->txq_init(il, txq);
3102  }
3103  EXPORT_SYMBOL(il_tx_queue_reset);
3104  
3105  /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
3106  
3107  /*
3108   * il_enqueue_hcmd - enqueue a uCode command
3109   * @il: device ilate data point
3110   * @cmd: a point to the ucode command structure
3111   *
3112   * The function returns < 0 values to indicate the operation is
3113   * failed. On success, it turns the idx (> 0) of command in the
3114   * command queue.
3115   */
3116  int
il_enqueue_hcmd(struct il_priv * il,struct il_host_cmd * cmd)3117  il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3118  {
3119  	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3120  	struct il_queue *q = &txq->q;
3121  	struct il_device_cmd *out_cmd;
3122  	struct il_cmd_meta *out_meta;
3123  	dma_addr_t phys_addr;
3124  	unsigned long flags;
3125  	u8 *out_payload;
3126  	u32 idx;
3127  	u16 fix_size;
3128  
3129  	cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3130  	fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3131  
3132  	/* If any of the command structures end up being larger than
3133  	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
3134  	 * we will need to increase the size of the TFD entries
3135  	 * Also, check to see if command buffer should not exceed the size
3136  	 * of device_cmd and max_cmd_size. */
3137  	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3138  	       !(cmd->flags & CMD_SIZE_HUGE));
3139  	BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3140  
3141  	if (il_is_rfkill(il) || il_is_ctkill(il)) {
3142  		IL_WARN("Not sending command - %s KILL\n",
3143  			il_is_rfkill(il) ? "RF" : "CT");
3144  		return -EIO;
3145  	}
3146  
3147  	spin_lock_irqsave(&il->hcmd_lock, flags);
3148  
3149  	if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3150  		spin_unlock_irqrestore(&il->hcmd_lock, flags);
3151  
3152  		IL_ERR("Restarting adapter due to command queue full\n");
3153  		queue_work(il->workqueue, &il->restart);
3154  		return -ENOSPC;
3155  	}
3156  
3157  	idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3158  	out_cmd = txq->cmd[idx];
3159  	out_meta = &txq->meta[idx];
3160  
3161  	/* The payload is in the same place in regular and huge
3162  	 * command buffers, but we need to let the compiler know when
3163  	 * we're using a larger payload buffer to avoid "field-
3164  	 * spanning write" warnings at run-time for huge commands.
3165  	 */
3166  	if (cmd->flags & CMD_SIZE_HUGE)
3167  		out_payload = ((struct il_device_cmd_huge *)out_cmd)->cmd.payload;
3168  	else
3169  		out_payload = out_cmd->cmd.payload;
3170  
3171  	if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3172  		spin_unlock_irqrestore(&il->hcmd_lock, flags);
3173  		return -ENOSPC;
3174  	}
3175  
3176  	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
3177  	out_meta->flags = cmd->flags | CMD_MAPPED;
3178  	if (cmd->flags & CMD_WANT_SKB)
3179  		out_meta->source = cmd;
3180  	if (cmd->flags & CMD_ASYNC)
3181  		out_meta->callback = cmd->callback;
3182  
3183  	out_cmd->hdr.cmd = cmd->id;
3184  	memcpy(out_payload, cmd->data, cmd->len);
3185  
3186  	/* At this point, the out_cmd now has all of the incoming cmd
3187  	 * information */
3188  
3189  	out_cmd->hdr.flags = 0;
3190  	out_cmd->hdr.sequence =
3191  	    cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3192  	if (cmd->flags & CMD_SIZE_HUGE)
3193  		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3194  
3195  #ifdef CONFIG_IWLEGACY_DEBUG
3196  	switch (out_cmd->hdr.cmd) {
3197  	case C_TX_LINK_QUALITY_CMD:
3198  	case C_SENSITIVITY:
3199  		D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3200  			  "%d bytes at %d[%d]:%d\n",
3201  			  il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3202  			  le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3203  			  q->write_ptr, idx, il->cmd_queue);
3204  		break;
3205  	default:
3206  		D_HC("Sending command %s (#%x), seq: 0x%04X, "
3207  		     "%d bytes at %d[%d]:%d\n",
3208  		     il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3209  		     le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3210  		     idx, il->cmd_queue);
3211  	}
3212  #endif
3213  
3214  	phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size,
3215  				   DMA_BIDIRECTIONAL);
3216  	if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) {
3217  		idx = -ENOMEM;
3218  		goto out;
3219  	}
3220  	dma_unmap_addr_set(out_meta, mapping, phys_addr);
3221  	dma_unmap_len_set(out_meta, len, fix_size);
3222  
3223  	txq->need_update = 1;
3224  
3225  	if (il->ops->txq_update_byte_cnt_tbl)
3226  		/* Set up entry in queue's byte count circular buffer */
3227  		il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3228  
3229  	il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3230  					    U32_PAD(cmd->len));
3231  
3232  	/* Increment and update queue's write idx */
3233  	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3234  	il_txq_update_write_ptr(il, txq);
3235  
3236  out:
3237  	spin_unlock_irqrestore(&il->hcmd_lock, flags);
3238  	return idx;
3239  }
3240  
3241  /*
3242   * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3243   *
3244   * When FW advances 'R' idx, all entries between old and new 'R' idx
3245   * need to be reclaimed. As result, some free space forms.  If there is
3246   * enough free space (> low mark), wake the stack that feeds us.
3247   */
3248  static void
il_hcmd_queue_reclaim(struct il_priv * il,int txq_id,int idx,int cmd_idx)3249  il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3250  {
3251  	struct il_tx_queue *txq = &il->txq[txq_id];
3252  	struct il_queue *q = &txq->q;
3253  	int nfreed = 0;
3254  
3255  	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3256  		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3257  		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3258  		       q->write_ptr, q->read_ptr);
3259  		return;
3260  	}
3261  
3262  	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3263  	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3264  
3265  		if (nfreed++ > 0) {
3266  			IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3267  			       q->write_ptr, q->read_ptr);
3268  			queue_work(il->workqueue, &il->restart);
3269  		}
3270  
3271  	}
3272  }
3273  
3274  /*
3275   * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3276   * @rxb: Rx buffer to reclaim
3277   *
3278   * If an Rx buffer has an async callback associated with it the callback
3279   * will be executed.  The attached skb (if present) will only be freed
3280   * if the callback returns 1
3281   */
3282  void
il_tx_cmd_complete(struct il_priv * il,struct il_rx_buf * rxb)3283  il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3284  {
3285  	struct il_rx_pkt *pkt = rxb_addr(rxb);
3286  	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3287  	int txq_id = SEQ_TO_QUEUE(sequence);
3288  	int idx = SEQ_TO_IDX(sequence);
3289  	int cmd_idx;
3290  	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3291  	struct il_device_cmd *cmd;
3292  	struct il_cmd_meta *meta;
3293  	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3294  	unsigned long flags;
3295  
3296  	/* If a Tx command is being handled and it isn't in the actual
3297  	 * command queue then there a command routing bug has been introduced
3298  	 * in the queue management code. */
3299  	if (WARN
3300  	    (txq_id != il->cmd_queue,
3301  	     "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3302  	     txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3303  	     il->txq[il->cmd_queue].q.write_ptr)) {
3304  		il_print_hex_error(il, pkt, 32);
3305  		return;
3306  	}
3307  
3308  	cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3309  	cmd = txq->cmd[cmd_idx];
3310  	meta = &txq->meta[cmd_idx];
3311  
3312  	txq->time_stamp = jiffies;
3313  
3314  	dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
3315  			 dma_unmap_len(meta, len), DMA_BIDIRECTIONAL);
3316  
3317  	/* Input error checking is done when commands are added to queue. */
3318  	if (meta->flags & CMD_WANT_SKB) {
3319  		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3320  		rxb->page = NULL;
3321  	} else if (meta->callback)
3322  		meta->callback(il, cmd, pkt);
3323  
3324  	spin_lock_irqsave(&il->hcmd_lock, flags);
3325  
3326  	il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3327  
3328  	if (!(meta->flags & CMD_ASYNC)) {
3329  		clear_bit(S_HCMD_ACTIVE, &il->status);
3330  		D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3331  		       il_get_cmd_string(cmd->hdr.cmd));
3332  		wake_up(&il->wait_command_queue);
3333  	}
3334  
3335  	/* Mark as unmapped */
3336  	meta->flags = 0;
3337  
3338  	spin_unlock_irqrestore(&il->hcmd_lock, flags);
3339  }
3340  EXPORT_SYMBOL(il_tx_cmd_complete);
3341  
3342  MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3343  MODULE_VERSION(IWLWIFI_VERSION);
3344  MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3345  MODULE_LICENSE("GPL");
3346  
3347  /*
3348   * set bt_coex_active to true, uCode will do kill/defer
3349   * every time the priority line is asserted (BT is sending signals on the
3350   * priority line in the PCIx).
3351   * set bt_coex_active to false, uCode will ignore the BT activity and
3352   * perform the normal operation
3353   *
3354   * User might experience transmit issue on some platform due to WiFi/BT
3355   * co-exist problem. The possible behaviors are:
3356   *   Able to scan and finding all the available AP
3357   *   Not able to associate with any AP
3358   * On those platforms, WiFi communication can be restored by set
3359   * "bt_coex_active" module parameter to "false"
3360   *
3361   * default: bt_coex_active = true (BT_COEX_ENABLE)
3362   */
3363  static bool bt_coex_active = true;
3364  module_param(bt_coex_active, bool, 0444);
3365  MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3366  
3367  u32 il_debug_level;
3368  EXPORT_SYMBOL(il_debug_level);
3369  
3370  const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3371  EXPORT_SYMBOL(il_bcast_addr);
3372  
3373  #define MAX_BIT_RATE_40_MHZ 150	/* Mbps */
3374  #define MAX_BIT_RATE_20_MHZ 72	/* Mbps */
3375  static void
il_init_ht_hw_capab(const struct il_priv * il,struct ieee80211_sta_ht_cap * ht_info,enum nl80211_band band)3376  il_init_ht_hw_capab(const struct il_priv *il,
3377  		    struct ieee80211_sta_ht_cap *ht_info,
3378  		    enum nl80211_band band)
3379  {
3380  	u16 max_bit_rate = 0;
3381  	u8 rx_chains_num = il->hw_params.rx_chains_num;
3382  	u8 tx_chains_num = il->hw_params.tx_chains_num;
3383  
3384  	ht_info->cap = 0;
3385  	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3386  
3387  	ht_info->ht_supported = true;
3388  
3389  	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3390  	max_bit_rate = MAX_BIT_RATE_20_MHZ;
3391  	if (il->hw_params.ht40_channel & BIT(band)) {
3392  		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3393  		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3394  		ht_info->mcs.rx_mask[4] = 0x01;
3395  		max_bit_rate = MAX_BIT_RATE_40_MHZ;
3396  	}
3397  
3398  	if (il->cfg->mod_params->amsdu_size_8K)
3399  		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3400  
3401  	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3402  	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3403  
3404  	ht_info->mcs.rx_mask[0] = 0xFF;
3405  	if (rx_chains_num >= 2)
3406  		ht_info->mcs.rx_mask[1] = 0xFF;
3407  	if (rx_chains_num >= 3)
3408  		ht_info->mcs.rx_mask[2] = 0xFF;
3409  
3410  	/* Highest supported Rx data rate */
3411  	max_bit_rate *= rx_chains_num;
3412  	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3413  	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3414  
3415  	/* Tx MCS capabilities */
3416  	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3417  	if (tx_chains_num != rx_chains_num) {
3418  		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3419  		ht_info->mcs.tx_params |=
3420  		    ((tx_chains_num -
3421  		      1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3422  	}
3423  }
3424  
3425  /*
3426   * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3427   */
3428  int
il_init_geos(struct il_priv * il)3429  il_init_geos(struct il_priv *il)
3430  {
3431  	struct il_channel_info *ch;
3432  	struct ieee80211_supported_band *sband;
3433  	struct ieee80211_channel *channels;
3434  	struct ieee80211_channel *geo_ch;
3435  	struct ieee80211_rate *rates;
3436  	int i = 0;
3437  	s8 max_tx_power = 0;
3438  
3439  	if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
3440  	    il->bands[NL80211_BAND_5GHZ].n_bitrates) {
3441  		D_INFO("Geography modes already initialized.\n");
3442  		set_bit(S_GEO_CONFIGURED, &il->status);
3443  		return 0;
3444  	}
3445  
3446  	channels =
3447  	    kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
3448  		    GFP_KERNEL);
3449  	if (!channels)
3450  		return -ENOMEM;
3451  
3452  	rates = kcalloc(RATE_COUNT_LEGACY, sizeof(*rates), GFP_KERNEL);
3453  	if (!rates) {
3454  		kfree(channels);
3455  		return -ENOMEM;
3456  	}
3457  
3458  	/* 5.2GHz channels start after the 2.4GHz channels */
3459  	sband = &il->bands[NL80211_BAND_5GHZ];
3460  	sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3461  	/* just OFDM */
3462  	sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3463  	sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3464  
3465  	if (il->cfg->sku & IL_SKU_N)
3466  		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
3467  
3468  	sband = &il->bands[NL80211_BAND_2GHZ];
3469  	sband->channels = channels;
3470  	/* OFDM & CCK */
3471  	sband->bitrates = rates;
3472  	sband->n_bitrates = RATE_COUNT_LEGACY;
3473  
3474  	if (il->cfg->sku & IL_SKU_N)
3475  		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
3476  
3477  	il->ieee_channels = channels;
3478  	il->ieee_rates = rates;
3479  
3480  	for (i = 0; i < il->channel_count; i++) {
3481  		ch = &il->channel_info[i];
3482  
3483  		if (!il_is_channel_valid(ch))
3484  			continue;
3485  
3486  		sband = &il->bands[ch->band];
3487  
3488  		geo_ch = &sband->channels[sband->n_channels++];
3489  
3490  		geo_ch->center_freq =
3491  		    ieee80211_channel_to_frequency(ch->channel, ch->band);
3492  		geo_ch->max_power = ch->max_power_avg;
3493  		geo_ch->max_antenna_gain = 0xff;
3494  		geo_ch->hw_value = ch->channel;
3495  
3496  		if (il_is_channel_valid(ch)) {
3497  			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3498  				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3499  
3500  			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3501  				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3502  
3503  			if (ch->flags & EEPROM_CHANNEL_RADAR)
3504  				geo_ch->flags |= IEEE80211_CHAN_RADAR;
3505  
3506  			geo_ch->flags |= ch->ht40_extension_channel;
3507  
3508  			if (ch->max_power_avg > max_tx_power)
3509  				max_tx_power = ch->max_power_avg;
3510  		} else {
3511  			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3512  		}
3513  
3514  		D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3515  		       geo_ch->center_freq,
3516  		       il_is_channel_a_band(ch) ? "5.2" : "2.4",
3517  		       geo_ch->
3518  		       flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3519  		       geo_ch->flags);
3520  	}
3521  
3522  	il->tx_power_device_lmt = max_tx_power;
3523  	il->tx_power_user_lmt = max_tx_power;
3524  	il->tx_power_next = max_tx_power;
3525  
3526  	if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
3527  	    (il->cfg->sku & IL_SKU_A)) {
3528  		IL_INFO("Incorrectly detected BG card as ABG. "
3529  			"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3530  			il->pci_dev->device, il->pci_dev->subsystem_device);
3531  		il->cfg->sku &= ~IL_SKU_A;
3532  	}
3533  
3534  	IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3535  		il->bands[NL80211_BAND_2GHZ].n_channels,
3536  		il->bands[NL80211_BAND_5GHZ].n_channels);
3537  
3538  	set_bit(S_GEO_CONFIGURED, &il->status);
3539  
3540  	return 0;
3541  }
3542  EXPORT_SYMBOL(il_init_geos);
3543  
3544  /*
3545   * il_free_geos - undo allocations in il_init_geos
3546   */
3547  void
il_free_geos(struct il_priv * il)3548  il_free_geos(struct il_priv *il)
3549  {
3550  	kfree(il->ieee_channels);
3551  	kfree(il->ieee_rates);
3552  	clear_bit(S_GEO_CONFIGURED, &il->status);
3553  }
3554  EXPORT_SYMBOL(il_free_geos);
3555  
3556  static bool
il_is_channel_extension(struct il_priv * il,enum nl80211_band band,u16 channel,u8 extension_chan_offset)3557  il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
3558  			u16 channel, u8 extension_chan_offset)
3559  {
3560  	const struct il_channel_info *ch_info;
3561  
3562  	ch_info = il_get_channel_info(il, band, channel);
3563  	if (!il_is_channel_valid(ch_info))
3564  		return false;
3565  
3566  	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3567  		return !(ch_info->
3568  			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3569  	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3570  		return !(ch_info->
3571  			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3572  
3573  	return false;
3574  }
3575  
3576  bool
il_is_ht40_tx_allowed(struct il_priv * il,struct ieee80211_sta_ht_cap * ht_cap)3577  il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3578  {
3579  	if (!il->ht.enabled || !il->ht.is_40mhz)
3580  		return false;
3581  
3582  	/*
3583  	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3584  	 * the bit will not set if it is pure 40MHz case
3585  	 */
3586  	if (ht_cap && !ht_cap->ht_supported)
3587  		return false;
3588  
3589  #ifdef CONFIG_IWLEGACY_DEBUGFS
3590  	if (il->disable_ht40)
3591  		return false;
3592  #endif
3593  
3594  	return il_is_channel_extension(il, il->band,
3595  				       le16_to_cpu(il->staging.channel),
3596  				       il->ht.extension_chan_offset);
3597  }
3598  EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3599  
3600  static u16 noinline
il_adjust_beacon_interval(u16 beacon_val,u16 max_beacon_val)3601  il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3602  {
3603  	u16 new_val;
3604  	u16 beacon_factor;
3605  
3606  	/*
3607  	 * If mac80211 hasn't given us a beacon interval, program
3608  	 * the default into the device.
3609  	 */
3610  	if (!beacon_val)
3611  		return DEFAULT_BEACON_INTERVAL;
3612  
3613  	/*
3614  	 * If the beacon interval we obtained from the peer
3615  	 * is too large, we'll have to wake up more often
3616  	 * (and in IBSS case, we'll beacon too much)
3617  	 *
3618  	 * For example, if max_beacon_val is 4096, and the
3619  	 * requested beacon interval is 7000, we'll have to
3620  	 * use 3500 to be able to wake up on the beacons.
3621  	 *
3622  	 * This could badly influence beacon detection stats.
3623  	 */
3624  
3625  	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3626  	new_val = beacon_val / beacon_factor;
3627  
3628  	if (!new_val)
3629  		new_val = max_beacon_val;
3630  
3631  	return new_val;
3632  }
3633  
3634  int
il_send_rxon_timing(struct il_priv * il)3635  il_send_rxon_timing(struct il_priv *il)
3636  {
3637  	u64 tsf;
3638  	s32 interval_tm, rem;
3639  	struct ieee80211_conf *conf = NULL;
3640  	u16 beacon_int;
3641  	struct ieee80211_vif *vif = il->vif;
3642  
3643  	conf = &il->hw->conf;
3644  
3645  	lockdep_assert_held(&il->mutex);
3646  
3647  	memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3648  
3649  	il->timing.timestamp = cpu_to_le64(il->timestamp);
3650  	il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3651  
3652  	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3653  
3654  	/*
3655  	 * TODO: For IBSS we need to get atim_win from mac80211,
3656  	 *       for now just always use 0
3657  	 */
3658  	il->timing.atim_win = 0;
3659  
3660  	beacon_int =
3661  	    il_adjust_beacon_interval(beacon_int,
3662  				      il->hw_params.max_beacon_itrvl *
3663  				      TIME_UNIT);
3664  	il->timing.beacon_interval = cpu_to_le16(beacon_int);
3665  
3666  	tsf = il->timestamp;	/* tsf is modifed by do_div: copy it */
3667  	interval_tm = beacon_int * TIME_UNIT;
3668  	rem = do_div(tsf, interval_tm);
3669  	il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3670  
3671  	il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3672  
3673  	D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3674  		le16_to_cpu(il->timing.beacon_interval),
3675  		le32_to_cpu(il->timing.beacon_init_val),
3676  		le16_to_cpu(il->timing.atim_win));
3677  
3678  	return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3679  			       &il->timing);
3680  }
3681  EXPORT_SYMBOL(il_send_rxon_timing);
3682  
3683  void
il_set_rxon_hwcrypto(struct il_priv * il,int hw_decrypt)3684  il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3685  {
3686  	struct il_rxon_cmd *rxon = &il->staging;
3687  
3688  	if (hw_decrypt)
3689  		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3690  	else
3691  		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3692  
3693  }
3694  EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3695  
3696  /* validate RXON structure is valid */
3697  int
il_check_rxon_cmd(struct il_priv * il)3698  il_check_rxon_cmd(struct il_priv *il)
3699  {
3700  	struct il_rxon_cmd *rxon = &il->staging;
3701  	bool error = false;
3702  
3703  	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3704  		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3705  			IL_WARN("check 2.4G: wrong narrow\n");
3706  			error = true;
3707  		}
3708  		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3709  			IL_WARN("check 2.4G: wrong radar\n");
3710  			error = true;
3711  		}
3712  	} else {
3713  		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3714  			IL_WARN("check 5.2G: not short slot!\n");
3715  			error = true;
3716  		}
3717  		if (rxon->flags & RXON_FLG_CCK_MSK) {
3718  			IL_WARN("check 5.2G: CCK!\n");
3719  			error = true;
3720  		}
3721  	}
3722  	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3723  		IL_WARN("mac/bssid mcast!\n");
3724  		error = true;
3725  	}
3726  
3727  	/* make sure basic rates 6Mbps and 1Mbps are supported */
3728  	if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3729  	    (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3730  		IL_WARN("neither 1 nor 6 are basic\n");
3731  		error = true;
3732  	}
3733  
3734  	if (le16_to_cpu(rxon->assoc_id) > 2007) {
3735  		IL_WARN("aid > 2007\n");
3736  		error = true;
3737  	}
3738  
3739  	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3740  	    (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3741  		IL_WARN("CCK and short slot\n");
3742  		error = true;
3743  	}
3744  
3745  	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3746  	    (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3747  		IL_WARN("CCK and auto detect");
3748  		error = true;
3749  	}
3750  
3751  	if ((rxon->
3752  	     flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3753  	    RXON_FLG_TGG_PROTECT_MSK) {
3754  		IL_WARN("TGg but no auto-detect\n");
3755  		error = true;
3756  	}
3757  
3758  	if (error)
3759  		IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3760  
3761  	if (error) {
3762  		IL_ERR("Invalid RXON\n");
3763  		return -EINVAL;
3764  	}
3765  	return 0;
3766  }
3767  EXPORT_SYMBOL(il_check_rxon_cmd);
3768  
3769  /*
3770   * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3771   * @il: staging_rxon is compared to active_rxon
3772   *
3773   * If the RXON structure is changing enough to require a new tune,
3774   * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3775   * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3776   */
3777  int
il_full_rxon_required(struct il_priv * il)3778  il_full_rxon_required(struct il_priv *il)
3779  {
3780  	const struct il_rxon_cmd *staging = &il->staging;
3781  	const struct il_rxon_cmd *active = &il->active;
3782  
3783  #define CHK(cond)							\
3784  	if ((cond)) {							\
3785  		D_INFO("need full RXON - " #cond "\n");	\
3786  		return 1;						\
3787  	}
3788  
3789  #define CHK_NEQ(c1, c2)						\
3790  	if ((c1) != (c2)) {					\
3791  		D_INFO("need full RXON - "	\
3792  			       #c1 " != " #c2 " - %d != %d\n",	\
3793  			       (c1), (c2));			\
3794  		return 1;					\
3795  	}
3796  
3797  	/* These items are only settable from the full RXON command */
3798  	CHK(!il_is_associated(il));
3799  	CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
3800  	CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
3801  	CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
3802  				     active->wlap_bssid_addr));
3803  	CHK_NEQ(staging->dev_type, active->dev_type);
3804  	CHK_NEQ(staging->channel, active->channel);
3805  	CHK_NEQ(staging->air_propagation, active->air_propagation);
3806  	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3807  		active->ofdm_ht_single_stream_basic_rates);
3808  	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3809  		active->ofdm_ht_dual_stream_basic_rates);
3810  	CHK_NEQ(staging->assoc_id, active->assoc_id);
3811  
3812  	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3813  	 * be updated with the RXON_ASSOC command -- however only some
3814  	 * flag transitions are allowed using RXON_ASSOC */
3815  
3816  	/* Check if we are not switching bands */
3817  	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3818  		active->flags & RXON_FLG_BAND_24G_MSK);
3819  
3820  	/* Check if we are switching association toggle */
3821  	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3822  		active->filter_flags & RXON_FILTER_ASSOC_MSK);
3823  
3824  #undef CHK
3825  #undef CHK_NEQ
3826  
3827  	return 0;
3828  }
3829  EXPORT_SYMBOL(il_full_rxon_required);
3830  
3831  u8
il_get_lowest_plcp(struct il_priv * il)3832  il_get_lowest_plcp(struct il_priv *il)
3833  {
3834  	/*
3835  	 * Assign the lowest rate -- should really get this from
3836  	 * the beacon skb from mac80211.
3837  	 */
3838  	if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3839  		return RATE_1M_PLCP;
3840  	else
3841  		return RATE_6M_PLCP;
3842  }
3843  EXPORT_SYMBOL(il_get_lowest_plcp);
3844  
3845  static void
_il_set_rxon_ht(struct il_priv * il,struct il_ht_config * ht_conf)3846  _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3847  {
3848  	struct il_rxon_cmd *rxon = &il->staging;
3849  
3850  	if (!il->ht.enabled) {
3851  		rxon->flags &=
3852  		    ~(RXON_FLG_CHANNEL_MODE_MSK |
3853  		      RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3854  		      | RXON_FLG_HT_PROT_MSK);
3855  		return;
3856  	}
3857  
3858  	rxon->flags |=
3859  	    cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3860  
3861  	/* Set up channel bandwidth:
3862  	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3863  	/* clear the HT channel mode before set the mode */
3864  	rxon->flags &=
3865  	    ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3866  	if (il_is_ht40_tx_allowed(il, NULL)) {
3867  		/* pure ht40 */
3868  		if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3869  			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3870  			/* Note: control channel is opposite of extension channel */
3871  			switch (il->ht.extension_chan_offset) {
3872  			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3873  				rxon->flags &=
3874  				    ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3875  				break;
3876  			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3877  				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3878  				break;
3879  			}
3880  		} else {
3881  			/* Note: control channel is opposite of extension channel */
3882  			switch (il->ht.extension_chan_offset) {
3883  			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3884  				rxon->flags &=
3885  				    ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3886  				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3887  				break;
3888  			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3889  				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3890  				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3891  				break;
3892  			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3893  			default:
3894  				/* channel location only valid if in Mixed mode */
3895  				IL_ERR("invalid extension channel offset\n");
3896  				break;
3897  			}
3898  		}
3899  	} else {
3900  		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3901  	}
3902  
3903  	if (il->ops->set_rxon_chain)
3904  		il->ops->set_rxon_chain(il);
3905  
3906  	D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3907  		"extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3908  		il->ht.protection, il->ht.extension_chan_offset);
3909  }
3910  
3911  void
il_set_rxon_ht(struct il_priv * il,struct il_ht_config * ht_conf)3912  il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3913  {
3914  	_il_set_rxon_ht(il, ht_conf);
3915  }
3916  EXPORT_SYMBOL(il_set_rxon_ht);
3917  
3918  /* Return valid, unused, channel for a passive scan to reset the RF */
3919  u8
il_get_single_channel_number(struct il_priv * il,enum nl80211_band band)3920  il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
3921  {
3922  	const struct il_channel_info *ch_info;
3923  	int i;
3924  	u8 channel = 0;
3925  	u8 min, max;
3926  
3927  	if (band == NL80211_BAND_5GHZ) {
3928  		min = 14;
3929  		max = il->channel_count;
3930  	} else {
3931  		min = 0;
3932  		max = 14;
3933  	}
3934  
3935  	for (i = min; i < max; i++) {
3936  		channel = il->channel_info[i].channel;
3937  		if (channel == le16_to_cpu(il->staging.channel))
3938  			continue;
3939  
3940  		ch_info = il_get_channel_info(il, band, channel);
3941  		if (il_is_channel_valid(ch_info))
3942  			break;
3943  	}
3944  
3945  	return channel;
3946  }
3947  EXPORT_SYMBOL(il_get_single_channel_number);
3948  
3949  /*
3950   * il_set_rxon_channel - Set the band and channel values in staging RXON
3951   * @ch: requested channel as a pointer to struct ieee80211_channel
3952  
3953   * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
3954   * in the staging RXON flag structure based on the ch->band
3955   */
3956  int
il_set_rxon_channel(struct il_priv * il,struct ieee80211_channel * ch)3957  il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3958  {
3959  	enum nl80211_band band = ch->band;
3960  	u16 channel = ch->hw_value;
3961  
3962  	if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3963  		return 0;
3964  
3965  	il->staging.channel = cpu_to_le16(channel);
3966  	if (band == NL80211_BAND_5GHZ)
3967  		il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3968  	else
3969  		il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3970  
3971  	il->band = band;
3972  
3973  	D_INFO("Staging channel set to %d [%d]\n", channel, band);
3974  
3975  	return 0;
3976  }
3977  EXPORT_SYMBOL(il_set_rxon_channel);
3978  
3979  void
il_set_flags_for_band(struct il_priv * il,enum nl80211_band band,struct ieee80211_vif * vif)3980  il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
3981  		      struct ieee80211_vif *vif)
3982  {
3983  	if (band == NL80211_BAND_5GHZ) {
3984  		il->staging.flags &=
3985  		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3986  		      RXON_FLG_CCK_MSK);
3987  		il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3988  	} else {
3989  		/* Copied from il_post_associate() */
3990  		if (vif && vif->bss_conf.use_short_slot)
3991  			il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3992  		else
3993  			il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3994  
3995  		il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3996  		il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3997  		il->staging.flags &= ~RXON_FLG_CCK_MSK;
3998  	}
3999  }
4000  EXPORT_SYMBOL(il_set_flags_for_band);
4001  
4002  /*
4003   * initialize rxon structure with default values from eeprom
4004   */
4005  void
il_connection_init_rx_config(struct il_priv * il)4006  il_connection_init_rx_config(struct il_priv *il)
4007  {
4008  	const struct il_channel_info *ch_info;
4009  
4010  	memset(&il->staging, 0, sizeof(il->staging));
4011  
4012  	switch (il->iw_mode) {
4013  	case NL80211_IFTYPE_UNSPECIFIED:
4014  		il->staging.dev_type = RXON_DEV_TYPE_ESS;
4015  		break;
4016  	case NL80211_IFTYPE_STATION:
4017  		il->staging.dev_type = RXON_DEV_TYPE_ESS;
4018  		il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4019  		break;
4020  	case NL80211_IFTYPE_ADHOC:
4021  		il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4022  		il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4023  		il->staging.filter_flags =
4024  		    RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
4025  		break;
4026  	default:
4027  		IL_ERR("Unsupported interface type %d\n", il->vif->type);
4028  		return;
4029  	}
4030  
4031  #if 0
4032  	/* TODO:  Figure out when short_preamble would be set and cache from
4033  	 * that */
4034  	if (!hw_to_local(il->hw)->short_preamble)
4035  		il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4036  	else
4037  		il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4038  #endif
4039  
4040  	ch_info =
4041  	    il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4042  
4043  	if (!ch_info)
4044  		ch_info = &il->channel_info[0];
4045  
4046  	il->staging.channel = cpu_to_le16(ch_info->channel);
4047  	il->band = ch_info->band;
4048  
4049  	il_set_flags_for_band(il, il->band, il->vif);
4050  
4051  	il->staging.ofdm_basic_rates =
4052  	    (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4053  	il->staging.cck_basic_rates =
4054  	    (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4055  
4056  	/* clear both MIX and PURE40 mode flag */
4057  	il->staging.flags &=
4058  	    ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4059  	if (il->vif)
4060  		memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4061  
4062  	il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4063  	il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4064  }
4065  EXPORT_SYMBOL(il_connection_init_rx_config);
4066  
4067  void
il_set_rate(struct il_priv * il)4068  il_set_rate(struct il_priv *il)
4069  {
4070  	const struct ieee80211_supported_band *hw = NULL;
4071  	struct ieee80211_rate *rate;
4072  	int i;
4073  
4074  	hw = il_get_hw_mode(il, il->band);
4075  	if (!hw) {
4076  		IL_ERR("Failed to set rate: unable to get hw mode\n");
4077  		return;
4078  	}
4079  
4080  	il->active_rate = 0;
4081  
4082  	for (i = 0; i < hw->n_bitrates; i++) {
4083  		rate = &(hw->bitrates[i]);
4084  		if (rate->hw_value < RATE_COUNT_LEGACY)
4085  			il->active_rate |= (1 << rate->hw_value);
4086  	}
4087  
4088  	D_RATE("Set active_rate = %0x\n", il->active_rate);
4089  
4090  	il->staging.cck_basic_rates =
4091  	    (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4092  
4093  	il->staging.ofdm_basic_rates =
4094  	    (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4095  }
4096  EXPORT_SYMBOL(il_set_rate);
4097  
4098  void
il_chswitch_done(struct il_priv * il,bool is_success)4099  il_chswitch_done(struct il_priv *il, bool is_success)
4100  {
4101  	if (test_bit(S_EXIT_PENDING, &il->status))
4102  		return;
4103  
4104  	if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4105  		ieee80211_chswitch_done(il->vif, is_success, 0);
4106  }
4107  EXPORT_SYMBOL(il_chswitch_done);
4108  
4109  void
il_hdl_csa(struct il_priv * il,struct il_rx_buf * rxb)4110  il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4111  {
4112  	struct il_rx_pkt *pkt = rxb_addr(rxb);
4113  	struct il_csa_notification *csa = &(pkt->u.csa_notif);
4114  	struct il_rxon_cmd *rxon = (void *)&il->active;
4115  
4116  	if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4117  		return;
4118  
4119  	if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4120  		rxon->channel = csa->channel;
4121  		il->staging.channel = csa->channel;
4122  		D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4123  		il_chswitch_done(il, true);
4124  	} else {
4125  		IL_ERR("CSA notif (fail) : channel %d\n",
4126  		       le16_to_cpu(csa->channel));
4127  		il_chswitch_done(il, false);
4128  	}
4129  }
4130  EXPORT_SYMBOL(il_hdl_csa);
4131  
4132  #ifdef CONFIG_IWLEGACY_DEBUG
4133  void
il_print_rx_config_cmd(struct il_priv * il)4134  il_print_rx_config_cmd(struct il_priv *il)
4135  {
4136  	struct il_rxon_cmd *rxon = &il->staging;
4137  
4138  	D_RADIO("RX CONFIG:\n");
4139  	il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4140  	D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4141  	D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4142  	D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4143  	D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4144  	D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4145  	D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4146  	D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4147  	D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4148  	D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4149  }
4150  EXPORT_SYMBOL(il_print_rx_config_cmd);
4151  #endif
4152  /*
4153   * il_irq_handle_error - called for HW or SW error interrupt from card
4154   */
4155  void
il_irq_handle_error(struct il_priv * il)4156  il_irq_handle_error(struct il_priv *il)
4157  {
4158  	/* Set the FW error flag -- cleared on il_down */
4159  	set_bit(S_FW_ERROR, &il->status);
4160  
4161  	/* Cancel currently queued command. */
4162  	clear_bit(S_HCMD_ACTIVE, &il->status);
4163  
4164  	IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4165  
4166  	il->ops->dump_nic_error_log(il);
4167  	if (il->ops->dump_fh)
4168  		il->ops->dump_fh(il, NULL, false);
4169  #ifdef CONFIG_IWLEGACY_DEBUG
4170  	if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4171  		il_print_rx_config_cmd(il);
4172  #endif
4173  
4174  	wake_up(&il->wait_command_queue);
4175  
4176  	/* Keep the restart process from trying to send host
4177  	 * commands by clearing the INIT status bit */
4178  	clear_bit(S_READY, &il->status);
4179  
4180  	if (!test_bit(S_EXIT_PENDING, &il->status)) {
4181  		IL_DBG(IL_DL_FW_ERRORS,
4182  		       "Restarting adapter due to uCode error.\n");
4183  
4184  		if (il->cfg->mod_params->restart_fw)
4185  			queue_work(il->workqueue, &il->restart);
4186  	}
4187  }
4188  EXPORT_SYMBOL(il_irq_handle_error);
4189  
4190  static int
_il_apm_stop_master(struct il_priv * il)4191  _il_apm_stop_master(struct il_priv *il)
4192  {
4193  	int ret = 0;
4194  
4195  	/* stop device's busmaster DMA activity */
4196  	_il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4197  
4198  	ret =
4199  	    _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4200  			 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4201  	if (ret < 0)
4202  		IL_WARN("Master Disable Timed Out, 100 usec\n");
4203  
4204  	D_INFO("stop master\n");
4205  
4206  	return ret;
4207  }
4208  
4209  void
_il_apm_stop(struct il_priv * il)4210  _il_apm_stop(struct il_priv *il)
4211  {
4212  	lockdep_assert_held(&il->reg_lock);
4213  
4214  	D_INFO("Stop card, put in low power state\n");
4215  
4216  	/* Stop device's DMA activity */
4217  	_il_apm_stop_master(il);
4218  
4219  	/* Reset the entire device */
4220  	_il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4221  
4222  	udelay(10);
4223  
4224  	/*
4225  	 * Clear "initialization complete" bit to move adapter from
4226  	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4227  	 */
4228  	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4229  }
4230  EXPORT_SYMBOL(_il_apm_stop);
4231  
4232  void
il_apm_stop(struct il_priv * il)4233  il_apm_stop(struct il_priv *il)
4234  {
4235  	unsigned long flags;
4236  
4237  	spin_lock_irqsave(&il->reg_lock, flags);
4238  	_il_apm_stop(il);
4239  	spin_unlock_irqrestore(&il->reg_lock, flags);
4240  }
4241  EXPORT_SYMBOL(il_apm_stop);
4242  
4243  /*
4244   * Start up NIC's basic functionality after it has been reset
4245   * (e.g. after platform boot, or shutdown via il_apm_stop())
4246   * NOTE:  This does not load uCode nor start the embedded processor
4247   */
4248  int
il_apm_init(struct il_priv * il)4249  il_apm_init(struct il_priv *il)
4250  {
4251  	int ret = 0;
4252  	u16 lctl;
4253  
4254  	D_INFO("Init card's basic functions\n");
4255  
4256  	/*
4257  	 * Use "set_bit" below rather than "write", to preserve any hardware
4258  	 * bits already set by default after reset.
4259  	 */
4260  
4261  	/* Disable L0S exit timer (platform NMI Work/Around) */
4262  	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4263  		   CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4264  
4265  	/*
4266  	 * Disable L0s without affecting L1;
4267  	 *  don't wait for ICH L0s (ICH bug W/A)
4268  	 */
4269  	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4270  		   CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4271  
4272  	/* Set FH wait threshold to maximum (HW error during stress W/A) */
4273  	il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4274  
4275  	/*
4276  	 * Enable HAP INTA (interrupt from management bus) to
4277  	 * wake device's PCI Express link L1a -> L0s
4278  	 * NOTE:  This is no-op for 3945 (non-existent bit)
4279  	 */
4280  	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4281  		   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4282  
4283  	/*
4284  	 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4285  	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4286  	 * If so (likely), disable L0S, so device moves directly L0->L1;
4287  	 *    costs negligible amount of power savings.
4288  	 * If not (unlikely), enable L0S, so there is at least some
4289  	 *    power savings, even without L1.
4290  	 */
4291  	if (il->cfg->set_l0s) {
4292  		ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4293  		if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
4294  			/* L1-ASPM enabled; disable(!) L0S  */
4295  			il_set_bit(il, CSR_GIO_REG,
4296  				   CSR_GIO_REG_VAL_L0S_ENABLED);
4297  			D_POWER("L1 Enabled; Disabling L0S\n");
4298  		} else {
4299  			/* L1-ASPM disabled; enable(!) L0S */
4300  			il_clear_bit(il, CSR_GIO_REG,
4301  				     CSR_GIO_REG_VAL_L0S_ENABLED);
4302  			D_POWER("L1 Disabled; Enabling L0S\n");
4303  		}
4304  	}
4305  
4306  	/* Configure analog phase-lock-loop before activating to D0A */
4307  	if (il->cfg->pll_cfg_val)
4308  		il_set_bit(il, CSR_ANA_PLL_CFG,
4309  			   il->cfg->pll_cfg_val);
4310  
4311  	/*
4312  	 * Set "initialization complete" bit to move adapter from
4313  	 * D0U* --> D0A* (powered-up active) state.
4314  	 */
4315  	il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4316  
4317  	/*
4318  	 * Wait for clock stabilization; once stabilized, access to
4319  	 * device-internal resources is supported, e.g. il_wr_prph()
4320  	 * and accesses to uCode SRAM.
4321  	 */
4322  	ret =
4323  	    _il_poll_bit(il, CSR_GP_CNTRL,
4324  			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4325  			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4326  	if (ret < 0) {
4327  		D_INFO("Failed to init the card\n");
4328  		goto out;
4329  	}
4330  
4331  	/*
4332  	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4333  	 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4334  	 *
4335  	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4336  	 * do not disable clocks.  This preserves any hardware bits already
4337  	 * set by default in "CLK_CTRL_REG" after reset.
4338  	 */
4339  	if (il->cfg->use_bsm)
4340  		il_wr_prph(il, APMG_CLK_EN_REG,
4341  			   APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4342  	else
4343  		il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4344  	udelay(20);
4345  
4346  	/* Disable L1-Active */
4347  	il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4348  			 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4349  
4350  out:
4351  	return ret;
4352  }
4353  EXPORT_SYMBOL(il_apm_init);
4354  
4355  int
il_set_tx_power(struct il_priv * il,s8 tx_power,bool force)4356  il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4357  {
4358  	int ret;
4359  	s8 prev_tx_power;
4360  	bool defer;
4361  
4362  	lockdep_assert_held(&il->mutex);
4363  
4364  	if (il->tx_power_user_lmt == tx_power && !force)
4365  		return 0;
4366  
4367  	if (!il->ops->send_tx_power)
4368  		return -EOPNOTSUPP;
4369  
4370  	/* 0 dBm mean 1 milliwatt */
4371  	if (tx_power < 0) {
4372  		IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4373  		return -EINVAL;
4374  	}
4375  
4376  	if (tx_power > il->tx_power_device_lmt) {
4377  		IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4378  			tx_power, il->tx_power_device_lmt);
4379  		return -EINVAL;
4380  	}
4381  
4382  	if (!il_is_ready_rf(il))
4383  		return -EIO;
4384  
4385  	/* scan complete and commit_rxon use tx_power_next value,
4386  	 * it always need to be updated for newest request */
4387  	il->tx_power_next = tx_power;
4388  
4389  	/* do not set tx power when scanning or channel changing */
4390  	defer = test_bit(S_SCANNING, &il->status) ||
4391  	    memcmp(&il->active, &il->staging, sizeof(il->staging));
4392  	if (defer && !force) {
4393  		D_INFO("Deferring tx power set\n");
4394  		return 0;
4395  	}
4396  
4397  	prev_tx_power = il->tx_power_user_lmt;
4398  	il->tx_power_user_lmt = tx_power;
4399  
4400  	ret = il->ops->send_tx_power(il);
4401  
4402  	/* if fail to set tx_power, restore the orig. tx power */
4403  	if (ret) {
4404  		il->tx_power_user_lmt = prev_tx_power;
4405  		il->tx_power_next = prev_tx_power;
4406  	}
4407  	return ret;
4408  }
4409  EXPORT_SYMBOL(il_set_tx_power);
4410  
4411  void
il_send_bt_config(struct il_priv * il)4412  il_send_bt_config(struct il_priv *il)
4413  {
4414  	struct il_bt_cmd bt_cmd = {
4415  		.lead_time = BT_LEAD_TIME_DEF,
4416  		.max_kill = BT_MAX_KILL_DEF,
4417  		.kill_ack_mask = 0,
4418  		.kill_cts_mask = 0,
4419  	};
4420  
4421  	if (!bt_coex_active)
4422  		bt_cmd.flags = BT_COEX_DISABLE;
4423  	else
4424  		bt_cmd.flags = BT_COEX_ENABLE;
4425  
4426  	D_INFO("BT coex %s\n",
4427  	       (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4428  
4429  	if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4430  		IL_ERR("failed to send BT Coex Config\n");
4431  }
4432  EXPORT_SYMBOL(il_send_bt_config);
4433  
4434  int
il_send_stats_request(struct il_priv * il,u8 flags,bool clear)4435  il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4436  {
4437  	struct il_stats_cmd stats_cmd = {
4438  		.configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4439  	};
4440  
4441  	if (flags & CMD_ASYNC)
4442  		return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4443  					     &stats_cmd, NULL);
4444  	else
4445  		return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4446  				       &stats_cmd);
4447  }
4448  EXPORT_SYMBOL(il_send_stats_request);
4449  
4450  void
il_hdl_pm_sleep(struct il_priv * il,struct il_rx_buf * rxb)4451  il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4452  {
4453  #ifdef CONFIG_IWLEGACY_DEBUG
4454  	struct il_rx_pkt *pkt = rxb_addr(rxb);
4455  	struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4456  	D_RX("sleep mode: %d, src: %d\n",
4457  	     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4458  #endif
4459  }
4460  EXPORT_SYMBOL(il_hdl_pm_sleep);
4461  
4462  void
il_hdl_pm_debug_stats(struct il_priv * il,struct il_rx_buf * rxb)4463  il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4464  {
4465  	struct il_rx_pkt *pkt = rxb_addr(rxb);
4466  	u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4467  	D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4468  		il_get_cmd_string(pkt->hdr.cmd));
4469  	il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4470  }
4471  EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4472  
4473  void
il_hdl_error(struct il_priv * il,struct il_rx_buf * rxb)4474  il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4475  {
4476  	struct il_rx_pkt *pkt = rxb_addr(rxb);
4477  
4478  	IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4479  	       "seq 0x%04X ser 0x%08X\n",
4480  	       le32_to_cpu(pkt->u.err_resp.error_type),
4481  	       il_get_cmd_string(pkt->u.err_resp.cmd_id),
4482  	       pkt->u.err_resp.cmd_id,
4483  	       le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4484  	       le32_to_cpu(pkt->u.err_resp.error_info));
4485  }
4486  EXPORT_SYMBOL(il_hdl_error);
4487  
4488  void
il_clear_isr_stats(struct il_priv * il)4489  il_clear_isr_stats(struct il_priv *il)
4490  {
4491  	memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4492  }
4493  
4494  int
il_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4495  il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4496  	       unsigned int link_id, u16 queue,
4497  	       const struct ieee80211_tx_queue_params *params)
4498  {
4499  	struct il_priv *il = hw->priv;
4500  	unsigned long flags;
4501  	int q;
4502  
4503  	D_MAC80211("enter\n");
4504  
4505  	if (!il_is_ready_rf(il)) {
4506  		D_MAC80211("leave - RF not ready\n");
4507  		return -EIO;
4508  	}
4509  
4510  	if (queue >= AC_NUM) {
4511  		D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4512  		return 0;
4513  	}
4514  
4515  	q = AC_NUM - 1 - queue;
4516  
4517  	spin_lock_irqsave(&il->lock, flags);
4518  
4519  	il->qos_data.def_qos_parm.ac[q].cw_min =
4520  	    cpu_to_le16(params->cw_min);
4521  	il->qos_data.def_qos_parm.ac[q].cw_max =
4522  	    cpu_to_le16(params->cw_max);
4523  	il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4524  	il->qos_data.def_qos_parm.ac[q].edca_txop =
4525  	    cpu_to_le16((params->txop * 32));
4526  
4527  	il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4528  
4529  	spin_unlock_irqrestore(&il->lock, flags);
4530  
4531  	D_MAC80211("leave\n");
4532  	return 0;
4533  }
4534  EXPORT_SYMBOL(il_mac_conf_tx);
4535  
4536  int
il_mac_tx_last_beacon(struct ieee80211_hw * hw)4537  il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4538  {
4539  	struct il_priv *il = hw->priv;
4540  	int ret;
4541  
4542  	D_MAC80211("enter\n");
4543  
4544  	ret = (il->ibss_manager == IL_IBSS_MANAGER);
4545  
4546  	D_MAC80211("leave ret %d\n", ret);
4547  	return ret;
4548  }
4549  EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4550  
4551  static int
il_set_mode(struct il_priv * il)4552  il_set_mode(struct il_priv *il)
4553  {
4554  	il_connection_init_rx_config(il);
4555  
4556  	if (il->ops->set_rxon_chain)
4557  		il->ops->set_rxon_chain(il);
4558  
4559  	return il_commit_rxon(il);
4560  }
4561  
4562  int
il_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4563  il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4564  {
4565  	struct il_priv *il = hw->priv;
4566  	int err;
4567  	bool reset;
4568  
4569  	mutex_lock(&il->mutex);
4570  	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4571  
4572  	if (!il_is_ready_rf(il)) {
4573  		IL_WARN("Try to add interface when device not ready\n");
4574  		err = -EINVAL;
4575  		goto out;
4576  	}
4577  
4578  	/*
4579  	 * We do not support multiple virtual interfaces, but on hardware reset
4580  	 * we have to add the same interface again.
4581  	 */
4582  	reset = (il->vif == vif);
4583  	if (il->vif && !reset) {
4584  		err = -EOPNOTSUPP;
4585  		goto out;
4586  	}
4587  
4588  	il->vif = vif;
4589  	il->iw_mode = vif->type;
4590  
4591  	err = il_set_mode(il);
4592  	if (err) {
4593  		IL_WARN("Fail to set mode %d\n", vif->type);
4594  		if (!reset) {
4595  			il->vif = NULL;
4596  			il->iw_mode = NL80211_IFTYPE_STATION;
4597  		}
4598  	}
4599  
4600  out:
4601  	D_MAC80211("leave err %d\n", err);
4602  	mutex_unlock(&il->mutex);
4603  
4604  	return err;
4605  }
4606  EXPORT_SYMBOL(il_mac_add_interface);
4607  
4608  static void
il_teardown_interface(struct il_priv * il,struct ieee80211_vif * vif)4609  il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4610  {
4611  	lockdep_assert_held(&il->mutex);
4612  
4613  	if (il->scan_vif == vif) {
4614  		il_scan_cancel_timeout(il, 200);
4615  		il_force_scan_end(il);
4616  	}
4617  
4618  	il_set_mode(il);
4619  }
4620  
4621  void
il_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4622  il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4623  {
4624  	struct il_priv *il = hw->priv;
4625  
4626  	mutex_lock(&il->mutex);
4627  	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4628  
4629  	WARN_ON(il->vif != vif);
4630  	il->vif = NULL;
4631  	il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4632  	il_teardown_interface(il, vif);
4633  	eth_zero_addr(il->bssid);
4634  
4635  	D_MAC80211("leave\n");
4636  	mutex_unlock(&il->mutex);
4637  }
4638  EXPORT_SYMBOL(il_mac_remove_interface);
4639  
4640  int
il_alloc_txq_mem(struct il_priv * il)4641  il_alloc_txq_mem(struct il_priv *il)
4642  {
4643  	if (!il->txq)
4644  		il->txq =
4645  		    kcalloc(il->cfg->num_of_queues,
4646  			    sizeof(struct il_tx_queue),
4647  			    GFP_KERNEL);
4648  	if (!il->txq) {
4649  		IL_ERR("Not enough memory for txq\n");
4650  		return -ENOMEM;
4651  	}
4652  	return 0;
4653  }
4654  EXPORT_SYMBOL(il_alloc_txq_mem);
4655  
4656  void
il_free_txq_mem(struct il_priv * il)4657  il_free_txq_mem(struct il_priv *il)
4658  {
4659  	kfree(il->txq);
4660  	il->txq = NULL;
4661  }
4662  EXPORT_SYMBOL(il_free_txq_mem);
4663  
4664  int
il_force_reset(struct il_priv * il,bool external)4665  il_force_reset(struct il_priv *il, bool external)
4666  {
4667  	struct il_force_reset *force_reset;
4668  
4669  	if (test_bit(S_EXIT_PENDING, &il->status))
4670  		return -EINVAL;
4671  
4672  	force_reset = &il->force_reset;
4673  	force_reset->reset_request_count++;
4674  	if (!external) {
4675  		if (force_reset->last_force_reset_jiffies &&
4676  		    time_after(force_reset->last_force_reset_jiffies +
4677  			       force_reset->reset_duration, jiffies)) {
4678  			D_INFO("force reset rejected\n");
4679  			force_reset->reset_reject_count++;
4680  			return -EAGAIN;
4681  		}
4682  	}
4683  	force_reset->reset_success_count++;
4684  	force_reset->last_force_reset_jiffies = jiffies;
4685  
4686  	/*
4687  	 * if the request is from external(ex: debugfs),
4688  	 * then always perform the request in regardless the module
4689  	 * parameter setting
4690  	 * if the request is from internal (uCode error or driver
4691  	 * detect failure), then fw_restart module parameter
4692  	 * need to be check before performing firmware reload
4693  	 */
4694  
4695  	if (!external && !il->cfg->mod_params->restart_fw) {
4696  		D_INFO("Cancel firmware reload based on "
4697  		       "module parameter setting\n");
4698  		return 0;
4699  	}
4700  
4701  	IL_ERR("On demand firmware reload\n");
4702  
4703  	/* Set the FW error flag -- cleared on il_down */
4704  	set_bit(S_FW_ERROR, &il->status);
4705  	wake_up(&il->wait_command_queue);
4706  	/*
4707  	 * Keep the restart process from trying to send host
4708  	 * commands by clearing the INIT status bit
4709  	 */
4710  	clear_bit(S_READY, &il->status);
4711  	queue_work(il->workqueue, &il->restart);
4712  
4713  	return 0;
4714  }
4715  EXPORT_SYMBOL(il_force_reset);
4716  
4717  int
il_mac_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype newtype,bool newp2p)4718  il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4719  			enum nl80211_iftype newtype, bool newp2p)
4720  {
4721  	struct il_priv *il = hw->priv;
4722  	int err;
4723  
4724  	mutex_lock(&il->mutex);
4725  	D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4726  		    vif->type, vif->addr, newtype, newp2p);
4727  
4728  	if (newp2p) {
4729  		err = -EOPNOTSUPP;
4730  		goto out;
4731  	}
4732  
4733  	if (!il->vif || !il_is_ready_rf(il)) {
4734  		/*
4735  		 * Huh? But wait ... this can maybe happen when
4736  		 * we're in the middle of a firmware restart!
4737  		 */
4738  		err = -EBUSY;
4739  		goto out;
4740  	}
4741  
4742  	/* success */
4743  	vif->type = newtype;
4744  	vif->p2p = false;
4745  	il->iw_mode = newtype;
4746  	il_teardown_interface(il, vif);
4747  	err = 0;
4748  
4749  out:
4750  	D_MAC80211("leave err %d\n", err);
4751  	mutex_unlock(&il->mutex);
4752  
4753  	return err;
4754  }
4755  EXPORT_SYMBOL(il_mac_change_interface);
4756  
il_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)4757  void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4758  		  u32 queues, bool drop)
4759  {
4760  	struct il_priv *il = hw->priv;
4761  	unsigned long timeout = jiffies + msecs_to_jiffies(500);
4762  	int i;
4763  
4764  	mutex_lock(&il->mutex);
4765  	D_MAC80211("enter\n");
4766  
4767  	if (il->txq == NULL)
4768  		goto out;
4769  
4770  	for (i = 0; i < il->hw_params.max_txq_num; i++) {
4771  		struct il_queue *q;
4772  
4773  		if (i == il->cmd_queue)
4774  			continue;
4775  
4776  		q = &il->txq[i].q;
4777  		if (q->read_ptr == q->write_ptr)
4778  			continue;
4779  
4780  		if (time_after(jiffies, timeout)) {
4781  			IL_ERR("Failed to flush queue %d\n", q->id);
4782  			break;
4783  		}
4784  
4785  		msleep(20);
4786  	}
4787  out:
4788  	D_MAC80211("leave\n");
4789  	mutex_unlock(&il->mutex);
4790  }
4791  EXPORT_SYMBOL(il_mac_flush);
4792  
4793  /*
4794   * On every watchdog tick we check (latest) time stamp. If it does not
4795   * change during timeout period and queue is not empty we reset firmware.
4796   */
4797  static int
il_check_stuck_queue(struct il_priv * il,int cnt)4798  il_check_stuck_queue(struct il_priv *il, int cnt)
4799  {
4800  	struct il_tx_queue *txq = &il->txq[cnt];
4801  	struct il_queue *q = &txq->q;
4802  	unsigned long timeout;
4803  	unsigned long now = jiffies;
4804  	int ret;
4805  
4806  	if (q->read_ptr == q->write_ptr) {
4807  		txq->time_stamp = now;
4808  		return 0;
4809  	}
4810  
4811  	timeout =
4812  	    txq->time_stamp +
4813  	    msecs_to_jiffies(il->cfg->wd_timeout);
4814  
4815  	if (time_after(now, timeout)) {
4816  		IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4817  		       jiffies_to_msecs(now - txq->time_stamp));
4818  		ret = il_force_reset(il, false);
4819  		return (ret == -EAGAIN) ? 0 : 1;
4820  	}
4821  
4822  	return 0;
4823  }
4824  
4825  /*
4826   * Making watchdog tick be a quarter of timeout assure we will
4827   * discover the queue hung between timeout and 1.25*timeout
4828   */
4829  #define IL_WD_TICK(timeout) ((timeout) / 4)
4830  
4831  /*
4832   * Watchdog timer callback, we check each tx queue for stuck, if hung
4833   * we reset the firmware. If everything is fine just rearm the timer.
4834   */
4835  void
il_bg_watchdog(struct timer_list * t)4836  il_bg_watchdog(struct timer_list *t)
4837  {
4838  	struct il_priv *il = from_timer(il, t, watchdog);
4839  	int cnt;
4840  	unsigned long timeout;
4841  
4842  	if (test_bit(S_EXIT_PENDING, &il->status))
4843  		return;
4844  
4845  	timeout = il->cfg->wd_timeout;
4846  	if (timeout == 0)
4847  		return;
4848  
4849  	/* monitor and check for stuck cmd queue */
4850  	if (il_check_stuck_queue(il, il->cmd_queue))
4851  		return;
4852  
4853  	/* monitor and check for other stuck queues */
4854  	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4855  		/* skip as we already checked the command queue */
4856  		if (cnt == il->cmd_queue)
4857  			continue;
4858  		if (il_check_stuck_queue(il, cnt))
4859  			return;
4860  	}
4861  
4862  	mod_timer(&il->watchdog,
4863  		  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4864  }
4865  EXPORT_SYMBOL(il_bg_watchdog);
4866  
4867  void
il_setup_watchdog(struct il_priv * il)4868  il_setup_watchdog(struct il_priv *il)
4869  {
4870  	unsigned int timeout = il->cfg->wd_timeout;
4871  
4872  	if (timeout)
4873  		mod_timer(&il->watchdog,
4874  			  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4875  	else
4876  		del_timer(&il->watchdog);
4877  }
4878  EXPORT_SYMBOL(il_setup_watchdog);
4879  
4880  /*
4881   * extended beacon time format
4882   * time in usec will be changed into a 32-bit value in extended:internal format
4883   * the extended part is the beacon counts
4884   * the internal part is the time in usec within one beacon interval
4885   */
4886  u32
il_usecs_to_beacons(struct il_priv * il,u32 usec,u32 beacon_interval)4887  il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4888  {
4889  	u32 quot;
4890  	u32 rem;
4891  	u32 interval = beacon_interval * TIME_UNIT;
4892  
4893  	if (!interval || !usec)
4894  		return 0;
4895  
4896  	quot =
4897  	    (usec /
4898  	     interval) & (il_beacon_time_mask_high(il,
4899  						   il->hw_params.
4900  						   beacon_time_tsf_bits) >> il->
4901  			  hw_params.beacon_time_tsf_bits);
4902  	rem =
4903  	    (usec % interval) & il_beacon_time_mask_low(il,
4904  							il->hw_params.
4905  							beacon_time_tsf_bits);
4906  
4907  	return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4908  }
4909  EXPORT_SYMBOL(il_usecs_to_beacons);
4910  
4911  /* base is usually what we get from ucode with each received frame,
4912   * the same as HW timer counter counting down
4913   */
4914  __le32
il_add_beacon_time(struct il_priv * il,u32 base,u32 addon,u32 beacon_interval)4915  il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4916  		   u32 beacon_interval)
4917  {
4918  	u32 base_low = base & il_beacon_time_mask_low(il,
4919  						      il->hw_params.
4920  						      beacon_time_tsf_bits);
4921  	u32 addon_low = addon & il_beacon_time_mask_low(il,
4922  							il->hw_params.
4923  							beacon_time_tsf_bits);
4924  	u32 interval = beacon_interval * TIME_UNIT;
4925  	u32 res = (base & il_beacon_time_mask_high(il,
4926  						   il->hw_params.
4927  						   beacon_time_tsf_bits)) +
4928  	    (addon & il_beacon_time_mask_high(il,
4929  					      il->hw_params.
4930  					      beacon_time_tsf_bits));
4931  
4932  	if (base_low > addon_low)
4933  		res += base_low - addon_low;
4934  	else if (base_low < addon_low) {
4935  		res += interval + base_low - addon_low;
4936  		res += (1 << il->hw_params.beacon_time_tsf_bits);
4937  	} else
4938  		res += (1 << il->hw_params.beacon_time_tsf_bits);
4939  
4940  	return cpu_to_le32(res);
4941  }
4942  EXPORT_SYMBOL(il_add_beacon_time);
4943  
4944  #ifdef CONFIG_PM_SLEEP
4945  
4946  static int
il_pci_suspend(struct device * device)4947  il_pci_suspend(struct device *device)
4948  {
4949  	struct il_priv *il = dev_get_drvdata(device);
4950  
4951  	/*
4952  	 * This function is called when system goes into suspend state
4953  	 * mac80211 will call il_mac_stop() from the mac80211 suspend function
4954  	 * first but since il_mac_stop() has no knowledge of who the caller is,
4955  	 * it will not call apm_ops.stop() to stop the DMA operation.
4956  	 * Calling apm_ops.stop here to make sure we stop the DMA.
4957  	 */
4958  	il_apm_stop(il);
4959  
4960  	return 0;
4961  }
4962  
4963  static int
il_pci_resume(struct device * device)4964  il_pci_resume(struct device *device)
4965  {
4966  	struct pci_dev *pdev = to_pci_dev(device);
4967  	struct il_priv *il = pci_get_drvdata(pdev);
4968  	bool hw_rfkill = false;
4969  
4970  	/*
4971  	 * We disable the RETRY_TIMEOUT register (0x41) to keep
4972  	 * PCI Tx retries from interfering with C3 CPU state.
4973  	 */
4974  	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4975  
4976  	_il_wr(il, CSR_INT, 0xffffffff);
4977  	_il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
4978  	il_enable_interrupts(il);
4979  
4980  	if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4981  		hw_rfkill = true;
4982  
4983  	if (hw_rfkill)
4984  		set_bit(S_RFKILL, &il->status);
4985  	else
4986  		clear_bit(S_RFKILL, &il->status);
4987  
4988  	wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4989  
4990  	return 0;
4991  }
4992  
4993  SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4994  EXPORT_SYMBOL(il_pm_ops);
4995  
4996  #endif /* CONFIG_PM_SLEEP */
4997  
4998  static void
il_update_qos(struct il_priv * il)4999  il_update_qos(struct il_priv *il)
5000  {
5001  	if (test_bit(S_EXIT_PENDING, &il->status))
5002  		return;
5003  
5004  	il->qos_data.def_qos_parm.qos_flags = 0;
5005  
5006  	if (il->qos_data.qos_active)
5007  		il->qos_data.def_qos_parm.qos_flags |=
5008  		    QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5009  
5010  	if (il->ht.enabled)
5011  		il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5012  
5013  	D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5014  	      il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5015  
5016  	il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5017  			      &il->qos_data.def_qos_parm, NULL);
5018  }
5019  
5020  /*
5021   * il_mac_config - mac80211 config callback
5022   */
5023  int
il_mac_config(struct ieee80211_hw * hw,u32 changed)5024  il_mac_config(struct ieee80211_hw *hw, u32 changed)
5025  {
5026  	struct il_priv *il = hw->priv;
5027  	const struct il_channel_info *ch_info;
5028  	struct ieee80211_conf *conf = &hw->conf;
5029  	struct ieee80211_channel *channel = conf->chandef.chan;
5030  	struct il_ht_config *ht_conf = &il->current_ht_config;
5031  	unsigned long flags = 0;
5032  	int ret = 0;
5033  	u16 ch;
5034  	int scan_active = 0;
5035  	bool ht_changed = false;
5036  
5037  	mutex_lock(&il->mutex);
5038  	D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
5039  		   changed);
5040  
5041  	if (unlikely(test_bit(S_SCANNING, &il->status))) {
5042  		scan_active = 1;
5043  		D_MAC80211("scan active\n");
5044  	}
5045  
5046  	if (changed &
5047  	    (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5048  		/* mac80211 uses static for non-HT which is what we want */
5049  		il->current_ht_config.smps = conf->smps_mode;
5050  
5051  		/*
5052  		 * Recalculate chain counts.
5053  		 *
5054  		 * If monitor mode is enabled then mac80211 will
5055  		 * set up the SM PS mode to OFF if an HT channel is
5056  		 * configured.
5057  		 */
5058  		if (il->ops->set_rxon_chain)
5059  			il->ops->set_rxon_chain(il);
5060  	}
5061  
5062  	/* during scanning mac80211 will delay channel setting until
5063  	 * scan finish with changed = 0
5064  	 */
5065  	if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5066  
5067  		if (scan_active)
5068  			goto set_ch_out;
5069  
5070  		ch = channel->hw_value;
5071  		ch_info = il_get_channel_info(il, channel->band, ch);
5072  		if (!il_is_channel_valid(ch_info)) {
5073  			D_MAC80211("leave - invalid channel\n");
5074  			ret = -EINVAL;
5075  			goto set_ch_out;
5076  		}
5077  
5078  		if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5079  		    !il_is_channel_ibss(ch_info)) {
5080  			D_MAC80211("leave - not IBSS channel\n");
5081  			ret = -EINVAL;
5082  			goto set_ch_out;
5083  		}
5084  
5085  		spin_lock_irqsave(&il->lock, flags);
5086  
5087  		/* Configure HT40 channels */
5088  		if (il->ht.enabled != conf_is_ht(conf)) {
5089  			il->ht.enabled = conf_is_ht(conf);
5090  			ht_changed = true;
5091  		}
5092  		if (il->ht.enabled) {
5093  			if (conf_is_ht40_minus(conf)) {
5094  				il->ht.extension_chan_offset =
5095  				    IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5096  				il->ht.is_40mhz = true;
5097  			} else if (conf_is_ht40_plus(conf)) {
5098  				il->ht.extension_chan_offset =
5099  				    IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5100  				il->ht.is_40mhz = true;
5101  			} else {
5102  				il->ht.extension_chan_offset =
5103  				    IEEE80211_HT_PARAM_CHA_SEC_NONE;
5104  				il->ht.is_40mhz = false;
5105  			}
5106  		} else
5107  			il->ht.is_40mhz = false;
5108  
5109  		/*
5110  		 * Default to no protection. Protection mode will
5111  		 * later be set from BSS config in il_ht_conf
5112  		 */
5113  		il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5114  
5115  		/* if we are switching from ht to 2.4 clear flags
5116  		 * from any ht related info since 2.4 does not
5117  		 * support ht */
5118  		if ((le16_to_cpu(il->staging.channel) != ch))
5119  			il->staging.flags = 0;
5120  
5121  		il_set_rxon_channel(il, channel);
5122  		il_set_rxon_ht(il, ht_conf);
5123  
5124  		il_set_flags_for_band(il, channel->band, il->vif);
5125  
5126  		spin_unlock_irqrestore(&il->lock, flags);
5127  
5128  		if (il->ops->update_bcast_stations)
5129  			ret = il->ops->update_bcast_stations(il);
5130  
5131  set_ch_out:
5132  		/* The list of supported rates and rate mask can be different
5133  		 * for each band; since the band may have changed, reset
5134  		 * the rate mask to what mac80211 lists */
5135  		il_set_rate(il);
5136  	}
5137  
5138  	if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5139  		il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5140  		if (!il->power_data.ps_disabled)
5141  			IL_WARN_ONCE("Enabling power save might cause firmware crashes\n");
5142  		ret = il_power_update_mode(il, false);
5143  		if (ret)
5144  			D_MAC80211("Error setting sleep level\n");
5145  	}
5146  
5147  	if (changed & IEEE80211_CONF_CHANGE_POWER) {
5148  		D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5149  			   conf->power_level);
5150  
5151  		il_set_tx_power(il, conf->power_level, false);
5152  	}
5153  
5154  	if (!il_is_ready(il)) {
5155  		D_MAC80211("leave - not ready\n");
5156  		goto out;
5157  	}
5158  
5159  	if (scan_active)
5160  		goto out;
5161  
5162  	if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5163  		il_commit_rxon(il);
5164  	else
5165  		D_INFO("Not re-sending same RXON configuration.\n");
5166  	if (ht_changed)
5167  		il_update_qos(il);
5168  
5169  out:
5170  	D_MAC80211("leave ret %d\n", ret);
5171  	mutex_unlock(&il->mutex);
5172  
5173  	return ret;
5174  }
5175  EXPORT_SYMBOL(il_mac_config);
5176  
5177  void
il_mac_reset_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5178  il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5179  {
5180  	struct il_priv *il = hw->priv;
5181  	unsigned long flags;
5182  
5183  	mutex_lock(&il->mutex);
5184  	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5185  
5186  	spin_lock_irqsave(&il->lock, flags);
5187  
5188  	memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5189  
5190  	/* new association get rid of ibss beacon skb */
5191  	dev_consume_skb_irq(il->beacon_skb);
5192  	il->beacon_skb = NULL;
5193  	il->timestamp = 0;
5194  
5195  	spin_unlock_irqrestore(&il->lock, flags);
5196  
5197  	il_scan_cancel_timeout(il, 100);
5198  	if (!il_is_ready_rf(il)) {
5199  		D_MAC80211("leave - not ready\n");
5200  		mutex_unlock(&il->mutex);
5201  		return;
5202  	}
5203  
5204  	/* we are restarting association process */
5205  	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5206  	il_commit_rxon(il);
5207  
5208  	il_set_rate(il);
5209  
5210  	D_MAC80211("leave\n");
5211  	mutex_unlock(&il->mutex);
5212  }
5213  EXPORT_SYMBOL(il_mac_reset_tsf);
5214  
5215  static void
il_ht_conf(struct il_priv * il,struct ieee80211_vif * vif)5216  il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5217  {
5218  	struct il_ht_config *ht_conf = &il->current_ht_config;
5219  	struct ieee80211_sta *sta;
5220  	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5221  
5222  	D_ASSOC("enter:\n");
5223  
5224  	if (!il->ht.enabled)
5225  		return;
5226  
5227  	il->ht.protection =
5228  	    bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5229  	il->ht.non_gf_sta_present =
5230  	    !!(bss_conf->
5231  	       ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5232  
5233  	ht_conf->single_chain_sufficient = false;
5234  
5235  	switch (vif->type) {
5236  	case NL80211_IFTYPE_STATION:
5237  		rcu_read_lock();
5238  		sta = ieee80211_find_sta(vif, bss_conf->bssid);
5239  		if (sta) {
5240  			struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
5241  			int maxstreams;
5242  
5243  			maxstreams =
5244  			    (ht_cap->mcs.
5245  			     tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5246  			    >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5247  			maxstreams += 1;
5248  
5249  			if (ht_cap->mcs.rx_mask[1] == 0 &&
5250  			    ht_cap->mcs.rx_mask[2] == 0)
5251  				ht_conf->single_chain_sufficient = true;
5252  			if (maxstreams <= 1)
5253  				ht_conf->single_chain_sufficient = true;
5254  		} else {
5255  			/*
5256  			 * If at all, this can only happen through a race
5257  			 * when the AP disconnects us while we're still
5258  			 * setting up the connection, in that case mac80211
5259  			 * will soon tell us about that.
5260  			 */
5261  			ht_conf->single_chain_sufficient = true;
5262  		}
5263  		rcu_read_unlock();
5264  		break;
5265  	case NL80211_IFTYPE_ADHOC:
5266  		ht_conf->single_chain_sufficient = true;
5267  		break;
5268  	default:
5269  		break;
5270  	}
5271  
5272  	D_ASSOC("leave\n");
5273  }
5274  
5275  static inline void
il_set_no_assoc(struct il_priv * il,struct ieee80211_vif * vif)5276  il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5277  {
5278  	/*
5279  	 * inform the ucode that there is no longer an
5280  	 * association and that no more packets should be
5281  	 * sent
5282  	 */
5283  	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5284  	il->staging.assoc_id = 0;
5285  	il_commit_rxon(il);
5286  }
5287  
5288  static void
il_beacon_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5289  il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5290  {
5291  	struct il_priv *il = hw->priv;
5292  	unsigned long flags;
5293  	__le64 timestamp;
5294  	struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
5295  
5296  	if (!skb)
5297  		return;
5298  
5299  	D_MAC80211("enter\n");
5300  
5301  	lockdep_assert_held(&il->mutex);
5302  
5303  	if (!il->beacon_enabled) {
5304  		IL_ERR("update beacon with no beaconing enabled\n");
5305  		dev_kfree_skb(skb);
5306  		return;
5307  	}
5308  
5309  	spin_lock_irqsave(&il->lock, flags);
5310  	dev_consume_skb_irq(il->beacon_skb);
5311  	il->beacon_skb = skb;
5312  
5313  	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5314  	il->timestamp = le64_to_cpu(timestamp);
5315  
5316  	D_MAC80211("leave\n");
5317  	spin_unlock_irqrestore(&il->lock, flags);
5318  
5319  	if (!il_is_ready_rf(il)) {
5320  		D_MAC80211("leave - RF not ready\n");
5321  		return;
5322  	}
5323  
5324  	il->ops->post_associate(il);
5325  }
5326  
5327  void
il_mac_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changes)5328  il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5329  			struct ieee80211_bss_conf *bss_conf, u64 changes)
5330  {
5331  	struct il_priv *il = hw->priv;
5332  	int ret;
5333  
5334  	mutex_lock(&il->mutex);
5335  	D_MAC80211("enter: changes 0x%llx\n", changes);
5336  
5337  	if (!il_is_alive(il)) {
5338  		D_MAC80211("leave - not alive\n");
5339  		mutex_unlock(&il->mutex);
5340  		return;
5341  	}
5342  
5343  	if (changes & BSS_CHANGED_QOS) {
5344  		unsigned long flags;
5345  
5346  		spin_lock_irqsave(&il->lock, flags);
5347  		il->qos_data.qos_active = bss_conf->qos;
5348  		il_update_qos(il);
5349  		spin_unlock_irqrestore(&il->lock, flags);
5350  	}
5351  
5352  	if (changes & BSS_CHANGED_BEACON_ENABLED) {
5353  		/* FIXME: can we remove beacon_enabled ? */
5354  		if (vif->bss_conf.enable_beacon)
5355  			il->beacon_enabled = true;
5356  		else
5357  			il->beacon_enabled = false;
5358  	}
5359  
5360  	if (changes & BSS_CHANGED_BSSID) {
5361  		D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5362  
5363  		/*
5364  		 * On passive channel we wait with blocked queues to see if
5365  		 * there is traffic on that channel. If no frame will be
5366  		 * received (what is very unlikely since scan detects AP on
5367  		 * that channel, but theoretically possible), mac80211 associate
5368  		 * procedure will time out and mac80211 will call us with NULL
5369  		 * bssid. We have to unblock queues on such condition.
5370  		 */
5371  		if (is_zero_ether_addr(bss_conf->bssid))
5372  			il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5373  
5374  		/*
5375  		 * If there is currently a HW scan going on in the background,
5376  		 * then we need to cancel it, otherwise sometimes we are not
5377  		 * able to authenticate (FIXME: why ?)
5378  		 */
5379  		if (il_scan_cancel_timeout(il, 100)) {
5380  			D_MAC80211("leave - scan abort failed\n");
5381  			mutex_unlock(&il->mutex);
5382  			return;
5383  		}
5384  
5385  		/* mac80211 only sets assoc when in STATION mode */
5386  		memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5387  
5388  		/* FIXME: currently needed in a few places */
5389  		memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5390  	}
5391  
5392  	/*
5393  	 * This needs to be after setting the BSSID in case
5394  	 * mac80211 decides to do both changes at once because
5395  	 * it will invoke post_associate.
5396  	 */
5397  	if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5398  		il_beacon_update(hw, vif);
5399  
5400  	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5401  		D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5402  		if (bss_conf->use_short_preamble)
5403  			il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5404  		else
5405  			il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5406  	}
5407  
5408  	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5409  		D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5410  		if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
5411  			il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5412  		else
5413  			il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5414  		if (bss_conf->use_cts_prot)
5415  			il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5416  		else
5417  			il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5418  	}
5419  
5420  	if (changes & BSS_CHANGED_BASIC_RATES) {
5421  		/* XXX use this information
5422  		 *
5423  		 * To do that, remove code from il_set_rate() and put something
5424  		 * like this here:
5425  		 *
5426  		 if (A-band)
5427  		 il->staging.ofdm_basic_rates =
5428  		 bss_conf->basic_rates;
5429  		 else
5430  		 il->staging.ofdm_basic_rates =
5431  		 bss_conf->basic_rates >> 4;
5432  		 il->staging.cck_basic_rates =
5433  		 bss_conf->basic_rates & 0xF;
5434  		 */
5435  	}
5436  
5437  	if (changes & BSS_CHANGED_HT) {
5438  		il_ht_conf(il, vif);
5439  
5440  		if (il->ops->set_rxon_chain)
5441  			il->ops->set_rxon_chain(il);
5442  	}
5443  
5444  	if (changes & BSS_CHANGED_ASSOC) {
5445  		D_MAC80211("ASSOC %d\n", vif->cfg.assoc);
5446  		if (vif->cfg.assoc) {
5447  			il->timestamp = bss_conf->sync_tsf;
5448  
5449  			if (!il_is_rfkill(il))
5450  				il->ops->post_associate(il);
5451  		} else
5452  			il_set_no_assoc(il, vif);
5453  	}
5454  
5455  	if (changes && il_is_associated(il) && vif->cfg.aid) {
5456  		D_MAC80211("Changes (%#llx) while associated\n", changes);
5457  		ret = il_send_rxon_assoc(il);
5458  		if (!ret) {
5459  			/* Sync active_rxon with latest change. */
5460  			memcpy((void *)&il->active, &il->staging,
5461  			       sizeof(struct il_rxon_cmd));
5462  		}
5463  	}
5464  
5465  	if (changes & BSS_CHANGED_BEACON_ENABLED) {
5466  		if (vif->bss_conf.enable_beacon) {
5467  			memcpy(il->staging.bssid_addr, bss_conf->bssid,
5468  			       ETH_ALEN);
5469  			memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5470  			il->ops->config_ap(il);
5471  		} else
5472  			il_set_no_assoc(il, vif);
5473  	}
5474  
5475  	if (changes & BSS_CHANGED_IBSS) {
5476  		ret = il->ops->manage_ibss_station(il, vif,
5477  						   vif->cfg.ibss_joined);
5478  		if (ret)
5479  			IL_ERR("failed to %s IBSS station %pM\n",
5480  			       vif->cfg.ibss_joined ? "add" : "remove",
5481  			       bss_conf->bssid);
5482  	}
5483  
5484  	D_MAC80211("leave\n");
5485  	mutex_unlock(&il->mutex);
5486  }
5487  EXPORT_SYMBOL(il_mac_bss_info_changed);
5488  
5489  irqreturn_t
il_isr(int irq,void * data)5490  il_isr(int irq, void *data)
5491  {
5492  	struct il_priv *il = data;
5493  	u32 inta, inta_mask;
5494  	u32 inta_fh;
5495  	unsigned long flags;
5496  	if (!il)
5497  		return IRQ_NONE;
5498  
5499  	spin_lock_irqsave(&il->lock, flags);
5500  
5501  	/* Disable (but don't clear!) interrupts here to avoid
5502  	 *    back-to-back ISRs and sporadic interrupts from our NIC.
5503  	 * If we have something to service, the tasklet will re-enable ints.
5504  	 * If we *don't* have something, we'll re-enable before leaving here. */
5505  	inta_mask = _il_rd(il, CSR_INT_MASK);	/* just for debug */
5506  	_il_wr(il, CSR_INT_MASK, 0x00000000);
5507  
5508  	/* Discover which interrupts are active/pending */
5509  	inta = _il_rd(il, CSR_INT);
5510  	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5511  
5512  	/* Ignore interrupt if there's nothing in NIC to service.
5513  	 * This may be due to IRQ shared with another device,
5514  	 * or due to sporadic interrupts thrown from our NIC. */
5515  	if (!inta && !inta_fh) {
5516  		D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5517  		goto none;
5518  	}
5519  
5520  	if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5521  		/* Hardware disappeared. It might have already raised
5522  		 * an interrupt */
5523  		IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5524  		goto unplugged;
5525  	}
5526  
5527  	D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5528  	      inta_fh);
5529  
5530  	inta &= ~CSR_INT_BIT_SCD;
5531  
5532  	/* il_irq_tasklet() will service interrupts and re-enable them */
5533  	if (likely(inta || inta_fh))
5534  		tasklet_schedule(&il->irq_tasklet);
5535  
5536  unplugged:
5537  	spin_unlock_irqrestore(&il->lock, flags);
5538  	return IRQ_HANDLED;
5539  
5540  none:
5541  	/* re-enable interrupts here since we don't have anything to service. */
5542  	/* only Re-enable if disabled by irq */
5543  	if (test_bit(S_INT_ENABLED, &il->status))
5544  		il_enable_interrupts(il);
5545  	spin_unlock_irqrestore(&il->lock, flags);
5546  	return IRQ_NONE;
5547  }
5548  EXPORT_SYMBOL(il_isr);
5549  
5550  /*
5551   *  il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5552   *  function.
5553   */
5554  void
il_tx_cmd_protection(struct il_priv * il,struct ieee80211_tx_info * info,__le16 fc,__le32 * tx_flags)5555  il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5556  		     __le16 fc, __le32 *tx_flags)
5557  {
5558  	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5559  		*tx_flags |= TX_CMD_FLG_RTS_MSK;
5560  		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5561  		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5562  
5563  		if (!ieee80211_is_mgmt(fc))
5564  			return;
5565  
5566  		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5567  		case cpu_to_le16(IEEE80211_STYPE_AUTH):
5568  		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5569  		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5570  		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5571  			*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5572  			*tx_flags |= TX_CMD_FLG_CTS_MSK;
5573  			break;
5574  		}
5575  	} else if (info->control.rates[0].
5576  		   flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5577  		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5578  		*tx_flags |= TX_CMD_FLG_CTS_MSK;
5579  		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5580  	}
5581  }
5582  EXPORT_SYMBOL(il_tx_cmd_protection);
5583