0124-mac80211-fix-packet-loss-on-fq-reordering.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. From: Matthias Schiffer <mschiffer@universe-factory.net>
  2. Date: Fri, 30 Sep 2016 16:57:44 +0200
  3. Subject: mac80211: fix packet loss on fq reordering
  4. Signed-off-by: Felix Fietkau <nbd@nbd.name>
  5. Backport of LEDE a194ffd4a89588bc75aeb9a27f59c36afd3d24bd
  6. diff --git a/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch b/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
  7. new file mode 100644
  8. index 0000000..8ceed51
  9. --- /dev/null
  10. +++ b/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
  11. @@ -0,0 +1,478 @@
  12. +From: Felix Fietkau <nbd@nbd.name>
  13. +Date: Sun, 4 Sep 2016 17:46:24 +0200
  14. +Subject: [PATCH] mac80211: fix sequence number assignment for PS response
  15. + frames
  16. +
  17. +When using intermediate queues, sequence number allocation is deferred
  18. +until dequeue. This doesn't work for PS response frames, which bypass
  19. +those queues.
  20. +
  21. +Signed-off-by: Felix Fietkau <nbd@nbd.name>
  22. +---
  23. +
  24. +--- a/net/mac80211/tx.c
  25. ++++ b/net/mac80211/tx.c
  26. +@@ -38,6 +38,12 @@
  27. + #include "wme.h"
  28. + #include "rate.h"
  29. +
  30. ++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx);
  31. ++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
  32. ++ struct sta_info *sta, u8 pn_offs,
  33. ++ struct ieee80211_key_conf *key_conf,
  34. ++ struct sk_buff *skb);
  35. ++
  36. + /* misc utils */
  37. +
  38. + static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
  39. +@@ -849,8 +855,7 @@ ieee80211_tx_h_sequence(struct ieee80211
  40. + tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  41. + tx->sta->tx_stats.msdu[tid]++;
  42. +
  43. +- if (!tx->sta->sta.txq[0])
  44. +- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
  45. ++ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
  46. +
  47. + return TX_CONTINUE;
  48. + }
  49. +@@ -1398,6 +1403,7 @@ void ieee80211_txq_init(struct ieee80211
  50. + fq_tin_init(&txqi->tin);
  51. + fq_flow_init(&txqi->def_flow);
  52. + codel_vars_init(&txqi->def_cvars);
  53. ++ __skb_queue_head_init(&txqi->frags);
  54. +
  55. + txqi->txq.vif = &sdata->vif;
  56. +
  57. +@@ -1420,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee8021
  58. + struct fq_tin *tin = &txqi->tin;
  59. +
  60. + fq_tin_reset(fq, tin, fq_skb_free_func);
  61. ++ ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
  62. + }
  63. +
  64. + int ieee80211_txq_setup_flows(struct ieee80211_local *local)
  65. +@@ -1476,12 +1483,19 @@ struct sk_buff *ieee80211_tx_dequeue(str
  66. + struct sk_buff *skb = NULL;
  67. + struct fq *fq = &local->fq;
  68. + struct fq_tin *tin = &txqi->tin;
  69. ++ struct ieee80211_tx_info *info;
  70. +
  71. + spin_lock_bh(&fq->lock);
  72. +
  73. + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
  74. + goto out;
  75. +
  76. ++ /* Make sure fragments stay together. */
  77. ++ skb = __skb_dequeue(&txqi->frags);
  78. ++ if (skb)
  79. ++ goto out;
  80. ++
  81. ++begin:
  82. + skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
  83. + if (!skb)
  84. + goto out;
  85. +@@ -1489,16 +1503,38 @@ struct sk_buff *ieee80211_tx_dequeue(str
  86. + ieee80211_set_skb_vif(skb, txqi);
  87. +
  88. + hdr = (struct ieee80211_hdr *)skb->data;
  89. +- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
  90. ++ info = IEEE80211_SKB_CB(skb);
  91. ++ if (txq->sta && info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
  92. + struct sta_info *sta = container_of(txq->sta, struct sta_info,
  93. + sta);
  94. +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  95. ++ u8 pn_offs = 0;
  96. +
  97. +- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
  98. +- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
  99. +- info->flags |= IEEE80211_TX_CTL_AMPDU;
  100. +- else
  101. +- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  102. ++ if (info->control.hw_key)
  103. ++ pn_offs = ieee80211_padded_hdrlen(hw, hdr->frame_control);
  104. ++
  105. ++ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
  106. ++ info->control.hw_key, skb);
  107. ++ } else {
  108. ++ struct ieee80211_tx_data tx = { };
  109. ++
  110. ++ __skb_queue_head_init(&tx.skbs);
  111. ++ tx.local = local;
  112. ++ tx.skb = skb;
  113. ++ tx.hdrlen = ieee80211_padded_hdrlen(hw, hdr->frame_control);
  114. ++ if (txq->sta) {
  115. ++ tx.sta = container_of(txq->sta, struct sta_info, sta);
  116. ++ tx.sdata = tx.sta->sdata;
  117. ++ } else {
  118. ++ tx.sdata = vif_to_sdata(info->control.vif);
  119. ++ }
  120. ++
  121. ++ if (invoke_tx_handlers_late(&tx))
  122. ++ goto begin;
  123. ++
  124. ++ skb = __skb_dequeue(&tx.skbs);
  125. ++
  126. ++ if (!skb_queue_empty(&tx.skbs))
  127. ++ skb_queue_splice_tail(&tx.skbs, &txqi->frags);
  128. + }
  129. +
  130. + out:
  131. +@@ -1512,6 +1548,47 @@ out:
  132. + }
  133. + EXPORT_SYMBOL(ieee80211_tx_dequeue);
  134. +
  135. ++static bool ieee80211_queue_skb(struct ieee80211_local *local,
  136. ++ struct ieee80211_sub_if_data *sdata,
  137. ++ struct sta_info *sta,
  138. ++ struct sk_buff *skb)
  139. ++{
  140. ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  141. ++ struct fq *fq = &local->fq;
  142. ++ struct ieee80211_vif *vif;
  143. ++ struct txq_info *txqi;
  144. ++ struct ieee80211_sta *pubsta;
  145. ++
  146. ++ if (!local->ops->wake_tx_queue ||
  147. ++ sdata->vif.type == NL80211_IFTYPE_MONITOR)
  148. ++ return false;
  149. ++
  150. ++ if (sta && sta->uploaded)
  151. ++ pubsta = &sta->sta;
  152. ++ else
  153. ++ pubsta = NULL;
  154. ++
  155. ++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  156. ++ sdata = container_of(sdata->bss,
  157. ++ struct ieee80211_sub_if_data, u.ap);
  158. ++
  159. ++ vif = &sdata->vif;
  160. ++ txqi = ieee80211_get_txq(local, vif, pubsta, skb);
  161. ++
  162. ++ if (!txqi)
  163. ++ return false;
  164. ++
  165. ++ info->control.vif = vif;
  166. ++
  167. ++ spin_lock_bh(&fq->lock);
  168. ++ ieee80211_txq_enqueue(local, txqi, skb);
  169. ++ spin_unlock_bh(&fq->lock);
  170. ++
  171. ++ drv_wake_tx_queue(local, txqi);
  172. ++
  173. ++ return true;
  174. ++}
  175. ++
  176. + static bool ieee80211_tx_frags(struct ieee80211_local *local,
  177. + struct ieee80211_vif *vif,
  178. + struct ieee80211_sta *sta,
  179. +@@ -1519,9 +1596,7 @@ static bool ieee80211_tx_frags(struct ie
  180. + bool txpending)
  181. + {
  182. + struct ieee80211_tx_control control = {};
  183. +- struct fq *fq = &local->fq;
  184. + struct sk_buff *skb, *tmp;
  185. +- struct txq_info *txqi;
  186. + unsigned long flags;
  187. +
  188. + skb_queue_walk_safe(skbs, skb, tmp) {
  189. +@@ -1536,21 +1611,6 @@ static bool ieee80211_tx_frags(struct ie
  190. + }
  191. + #endif
  192. +
  193. +- txqi = ieee80211_get_txq(local, vif, sta, skb);
  194. +- if (txqi) {
  195. +- info->control.vif = vif;
  196. +-
  197. +- __skb_unlink(skb, skbs);
  198. +-
  199. +- spin_lock_bh(&fq->lock);
  200. +- ieee80211_txq_enqueue(local, txqi, skb);
  201. +- spin_unlock_bh(&fq->lock);
  202. +-
  203. +- drv_wake_tx_queue(local, txqi);
  204. +-
  205. +- continue;
  206. +- }
  207. +-
  208. + spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
  209. + if (local->queue_stop_reasons[q] ||
  210. + (!txpending && !skb_queue_empty(&local->pending[q]))) {
  211. +@@ -1671,10 +1731,13 @@ static bool __ieee80211_tx(struct ieee80
  212. + /*
  213. + * Invoke TX handlers, return 0 on success and non-zero if the
  214. + * frame was dropped or queued.
  215. ++ *
  216. ++ * The handlers are split into an early and late part. The latter is everything
  217. ++ * that can be sensitive to reordering, and will be deferred to after packets
  218. ++ * are dequeued from the intermediate queues (when they are enabled).
  219. + */
  220. +-static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
  221. ++static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
  222. + {
  223. +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
  224. + ieee80211_tx_result res = TX_DROP;
  225. +
  226. + #define CALL_TXH(txh) \
  227. +@@ -1688,16 +1751,42 @@ static int invoke_tx_handlers(struct iee
  228. + CALL_TXH(ieee80211_tx_h_check_assoc);
  229. + CALL_TXH(ieee80211_tx_h_ps_buf);
  230. + CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
  231. +- CALL_TXH(ieee80211_tx_h_select_key);
  232. ++
  233. + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
  234. + CALL_TXH(ieee80211_tx_h_rate_ctrl);
  235. +
  236. ++ txh_done:
  237. ++ if (unlikely(res == TX_DROP)) {
  238. ++ I802_DEBUG_INC(tx->local->tx_handlers_drop);
  239. ++ if (tx->skb)
  240. ++ ieee80211_free_txskb(&tx->local->hw, tx->skb);
  241. ++ else
  242. ++ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
  243. ++ return -1;
  244. ++ } else if (unlikely(res == TX_QUEUED)) {
  245. ++ I802_DEBUG_INC(tx->local->tx_handlers_queued);
  246. ++ return -1;
  247. ++ }
  248. ++
  249. ++ return 0;
  250. ++}
  251. ++
  252. ++/*
  253. ++ * Late handlers can be called while the sta lock is held. Handlers that can
  254. ++ * cause packets to be generated will cause deadlock!
  255. ++ */
  256. ++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
  257. ++{
  258. ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
  259. ++ ieee80211_tx_result res = TX_CONTINUE;
  260. ++
  261. + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
  262. + __skb_queue_tail(&tx->skbs, tx->skb);
  263. + tx->skb = NULL;
  264. + goto txh_done;
  265. + }
  266. +
  267. ++ CALL_TXH(ieee80211_tx_h_select_key);
  268. + CALL_TXH(ieee80211_tx_h_michael_mic_add);
  269. + CALL_TXH(ieee80211_tx_h_sequence);
  270. + CALL_TXH(ieee80211_tx_h_fragment);
  271. +@@ -1724,6 +1813,15 @@ static int invoke_tx_handlers(struct iee
  272. + return 0;
  273. + }
  274. +
  275. ++static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
  276. ++{
  277. ++ int r = invoke_tx_handlers_early(tx);
  278. ++ if (r)
  279. ++ return r;
  280. ++
  281. ++ return invoke_tx_handlers_late(tx);
  282. ++}
  283. ++
  284. + bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
  285. + struct ieee80211_vif *vif, struct sk_buff *skb,
  286. + int band, struct ieee80211_sta **sta)
  287. +@@ -1798,7 +1896,13 @@ static bool ieee80211_tx(struct ieee8021
  288. + info->hw_queue =
  289. + sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
  290. +
  291. +- if (!invoke_tx_handlers(&tx))
  292. ++ if (invoke_tx_handlers_early(&tx))
  293. ++ return false;
  294. ++
  295. ++ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
  296. ++ return true;
  297. ++
  298. ++ if (!invoke_tx_handlers_late(&tx))
  299. + result = __ieee80211_tx(local, &tx.skbs, led_len,
  300. + tx.sta, txpending);
  301. +
  302. +@@ -3181,7 +3285,7 @@ out:
  303. + }
  304. +
  305. + static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
  306. +- struct net_device *dev, struct sta_info *sta,
  307. ++ struct sta_info *sta,
  308. + struct ieee80211_fast_tx *fast_tx,
  309. + struct sk_buff *skb)
  310. + {
  311. +@@ -3192,9 +3296,9 @@ static bool ieee80211_xmit_fast(struct i
  312. + struct ethhdr eth;
  313. + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  314. + struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
  315. +- struct ieee80211_tx_data tx;
  316. +- ieee80211_tx_result r;
  317. + struct tid_ampdu_tx *tid_tx = NULL;
  318. ++ ieee80211_tx_result r;
  319. ++ struct ieee80211_tx_data tx;
  320. + u8 tid = IEEE80211_NUM_TIDS;
  321. +
  322. + /* control port protocol needs a lot of special handling */
  323. +@@ -3232,8 +3336,6 @@ static bool ieee80211_xmit_fast(struct i
  324. + return true;
  325. + }
  326. +
  327. +- ieee80211_tx_stats(dev, skb->len + extra_head);
  328. +-
  329. + if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
  330. + ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
  331. + return true;
  332. +@@ -3262,24 +3364,7 @@ static bool ieee80211_xmit_fast(struct i
  333. + info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
  334. + IEEE80211_TX_CTL_DONTFRAG |
  335. + (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
  336. +-
  337. +- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
  338. +- *ieee80211_get_qos_ctl(hdr) = tid;
  339. +- if (!sta->sta.txq[0])
  340. +- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
  341. +- } else {
  342. +- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
  343. +- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
  344. +- sdata->sequence_number += 0x10;
  345. +- }
  346. +-
  347. +- if (skb_shinfo(skb)->gso_size)
  348. +- sta->tx_stats.msdu[tid] +=
  349. +- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
  350. +- else
  351. +- sta->tx_stats.msdu[tid]++;
  352. +-
  353. +- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
  354. ++ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
  355. +
  356. + __skb_queue_head_init(&tx.skbs);
  357. +
  358. +@@ -3305,22 +3390,71 @@ static bool ieee80211_xmit_fast(struct i
  359. + }
  360. + }
  361. +
  362. ++ if (ieee80211_queue_skb(local, sdata, sta, skb))
  363. ++ return true;
  364. ++
  365. ++ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
  366. ++ &fast_tx->key->conf, skb);
  367. ++
  368. ++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  369. ++ sdata = container_of(sdata->bss,
  370. ++ struct ieee80211_sub_if_data, u.ap);
  371. ++
  372. ++ __skb_queue_tail(&tx.skbs, skb);
  373. ++ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
  374. ++
  375. ++ return true;
  376. ++}
  377. ++
  378. ++/*
  379. ++ * Can be called while the sta lock is held. Anything that can cause packets to
  380. ++ * be generated will cause deadlock!
  381. ++ */
  382. ++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
  383. ++ struct sta_info *sta, u8 pn_offs,
  384. ++ struct ieee80211_key_conf *key_conf,
  385. ++ struct sk_buff *skb)
  386. ++{
  387. ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  388. ++ struct ieee80211_hdr *hdr = (void *)skb->data;
  389. ++ u8 tid = IEEE80211_NUM_TIDS;
  390. ++
  391. ++ ieee80211_tx_stats(skb->dev, skb->len);
  392. ++
  393. ++ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
  394. ++ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  395. ++ *ieee80211_get_qos_ctl(hdr) = tid;
  396. ++ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
  397. ++ } else {
  398. ++ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
  399. ++ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
  400. ++ sdata->sequence_number += 0x10;
  401. ++ }
  402. ++
  403. ++ if (skb_shinfo(skb)->gso_size)
  404. ++ sta->tx_stats.msdu[tid] +=
  405. ++ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
  406. ++ else
  407. ++ sta->tx_stats.msdu[tid]++;
  408. ++
  409. ++ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
  410. ++
  411. + /* statistics normally done by ieee80211_tx_h_stats (but that
  412. + * has to consider fragmentation, so is more complex)
  413. + */
  414. + sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
  415. + sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
  416. +
  417. +- if (fast_tx->pn_offs) {
  418. ++ if (pn_offs) {
  419. + u64 pn;
  420. +- u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
  421. ++ u8 *crypto_hdr = skb->data + pn_offs;
  422. +
  423. +- switch (fast_tx->key->conf.cipher) {
  424. ++ switch (key_conf->cipher) {
  425. + case WLAN_CIPHER_SUITE_CCMP:
  426. + case WLAN_CIPHER_SUITE_CCMP_256:
  427. + case WLAN_CIPHER_SUITE_GCMP:
  428. + case WLAN_CIPHER_SUITE_GCMP_256:
  429. +- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
  430. ++ pn = atomic64_inc_return(&key_conf->tx_pn);
  431. + crypto_hdr[0] = pn;
  432. + crypto_hdr[1] = pn >> 8;
  433. + crypto_hdr[4] = pn >> 16;
  434. +@@ -3331,12 +3465,6 @@ static bool ieee80211_xmit_fast(struct i
  435. + }
  436. + }
  437. +
  438. +- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  439. +- sdata = container_of(sdata->bss,
  440. +- struct ieee80211_sub_if_data, u.ap);
  441. +-
  442. +- __skb_queue_tail(&tx.skbs, skb);
  443. +- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
  444. + return true;
  445. + }
  446. +
  447. +@@ -3364,7 +3492,7 @@ void __ieee80211_subif_start_xmit(struct
  448. + fast_tx = rcu_dereference(sta->fast_tx);
  449. +
  450. + if (fast_tx &&
  451. +- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
  452. ++ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
  453. + goto out;
  454. + }
  455. +
  456. +--- a/include/net/mac80211.h
  457. ++++ b/include/net/mac80211.h
  458. +@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
  459. + * frame (PS-Poll or uAPSD).
  460. + * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
  461. + * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
  462. ++ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
  463. + *
  464. + * These flags are used in tx_info->control.flags.
  465. + */
  466. +@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
  467. + IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
  468. + IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
  469. + IEEE80211_TX_CTRL_AMSDU = BIT(3),
  470. ++ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
  471. + };
  472. +
  473. + /*
  474. +--- a/net/mac80211/ieee80211_i.h
  475. ++++ b/net/mac80211/ieee80211_i.h
  476. +@@ -814,11 +814,13 @@ enum txq_info_flags {
  477. + * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
  478. + * a fq_flow which is already owned by a different tin
  479. + * @def_cvars: codel vars for @def_flow
  480. ++ * @frags: used to keep fragments created after dequeue
  481. + */
  482. + struct txq_info {
  483. + struct fq_tin tin;
  484. + struct fq_flow def_flow;
  485. + struct codel_vars def_cvars;
  486. ++ struct sk_buff_head frags;
  487. + unsigned long flags;
  488. +
  489. + /* keep last! */
  490. diff --git a/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch b/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch
  491. deleted file mode 100644
  492. index a82d12f..0000000
  493. --- a/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch
  494. +++ /dev/null
  495. @@ -1,107 +0,0 @@
  496. -From: Felix Fietkau <nbd@nbd.name>
  497. -Date: Sun, 4 Sep 2016 17:46:24 +0200
  498. -Subject: [PATCH] mac80211: fix sequence number assignment for PS response
  499. - frames
  500. -
  501. -When using intermediate queues, sequence number allocation is deferred
  502. -until dequeue. This doesn't work for PS response frames, which bypass
  503. -those queues.
  504. -
  505. -Signed-off-by: Felix Fietkau <nbd@nbd.name>
  506. ----
  507. -
  508. ---- a/net/mac80211/tx.c
  509. -+++ b/net/mac80211/tx.c
  510. -@@ -792,6 +792,36 @@ static __le16 ieee80211_tx_next_seq(stru
  511. - return ret;
  512. - }
  513. -
  514. -+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
  515. -+ struct ieee80211_vif *vif,
  516. -+ struct ieee80211_sta *pubsta,
  517. -+ struct sk_buff *skb)
  518. -+{
  519. -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  520. -+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  521. -+ struct ieee80211_txq *txq = NULL;
  522. -+
  523. -+ if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
  524. -+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
  525. -+ return NULL;
  526. -+
  527. -+ if (!ieee80211_is_data(hdr->frame_control))
  528. -+ return NULL;
  529. -+
  530. -+ if (pubsta) {
  531. -+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  532. -+
  533. -+ txq = pubsta->txq[tid];
  534. -+ } else if (vif) {
  535. -+ txq = vif->txq;
  536. -+ }
  537. -+
  538. -+ if (!txq)
  539. -+ return NULL;
  540. -+
  541. -+ return to_txq_info(txq);
  542. -+}
  543. -+
  544. - static ieee80211_tx_result debug_noinline
  545. - ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
  546. - {
  547. -@@ -849,7 +879,8 @@ ieee80211_tx_h_sequence(struct ieee80211
  548. - tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  549. - tx->sta->tx_stats.msdu[tid]++;
  550. -
  551. -- if (!tx->sta->sta.txq[0])
  552. -+ if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
  553. -+ tx->skb))
  554. - hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
  555. -
  556. - return TX_CONTINUE;
  557. -@@ -1238,36 +1269,6 @@ ieee80211_tx_prepare(struct ieee80211_su
  558. - return TX_CONTINUE;
  559. - }
  560. -
  561. --static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
  562. -- struct ieee80211_vif *vif,
  563. -- struct ieee80211_sta *pubsta,
  564. -- struct sk_buff *skb)
  565. --{
  566. -- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  567. -- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  568. -- struct ieee80211_txq *txq = NULL;
  569. --
  570. -- if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
  571. -- (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
  572. -- return NULL;
  573. --
  574. -- if (!ieee80211_is_data(hdr->frame_control))
  575. -- return NULL;
  576. --
  577. -- if (pubsta) {
  578. -- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  579. --
  580. -- txq = pubsta->txq[tid];
  581. -- } else if (vif) {
  582. -- txq = vif->txq;
  583. -- }
  584. --
  585. -- if (!txq)
  586. -- return NULL;
  587. --
  588. -- return to_txq_info(txq);
  589. --}
  590. --
  591. - static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
  592. - {
  593. - IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
  594. -@@ -3265,7 +3266,7 @@ static bool ieee80211_xmit_fast(struct i
  595. -
  596. - if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
  597. - *ieee80211_get_qos_ctl(hdr) = tid;
  598. -- if (!sta->sta.txq[0])
  599. -+ if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
  600. - hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
  601. - } else {
  602. - info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
  603. diff --git a/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch b/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch
  604. index c40598d..aba065e 100644
  605. --- a/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch
  606. +++ b/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch
  607. @@ -18,7 +18,7 @@
  608. const u8 *addr);
  609. --- a/include/net/mac80211.h
  610. +++ b/include/net/mac80211.h
  611. -@@ -1317,6 +1317,7 @@ enum ieee80211_smps_mode {
  612. +@@ -1319,6 +1319,7 @@ enum ieee80211_smps_mode {
  613. *
  614. * @power_level: requested transmit power (in dBm), backward compatibility
  615. * value only that is set to the minimum of all interfaces
  616. @@ -26,7 +26,7 @@
  617. *
  618. * @chandef: the channel definition to tune to
  619. * @radar_enabled: whether radar detection is enabled
  620. -@@ -1337,6 +1338,7 @@ enum ieee80211_smps_mode {
  621. +@@ -1339,6 +1340,7 @@ enum ieee80211_smps_mode {
  622. struct ieee80211_conf {
  623. u32 flags;
  624. int power_level, dynamic_ps_timeout;
  625. @@ -87,7 +87,7 @@
  626. CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
  627. --- a/net/mac80211/ieee80211_i.h
  628. +++ b/net/mac80211/ieee80211_i.h
  629. -@@ -1338,6 +1338,7 @@ struct ieee80211_local {
  630. +@@ -1340,6 +1340,7 @@ struct ieee80211_local {
  631. int dynamic_ps_forced_timeout;
  632. int user_power_level; /* in dBm, for all interfaces */