0017-mac80211-update-ath10k-to-compat-wireless-2015-03-05.patch 1.0 MB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824208252082620827208282082920830208312083220833208342083520836208372083820839208402084120842208432084420845208462084720848208492085020851208522085320854208552085620857208582085920860208612086220863208642086520866208672086820869208702087120872208732087420875208762087720878208792088020881208822088320884208852088620887208882088920890208912089220893208942089520896208972089820899209002090120902209032090420905209062090720908209092091020911209122091320914209152091620917209182091920920209212092220923209242092520926209272092820929209302093120932209332093420935209362093720938209392094020941209422094320944209452094620947209482094920950209512095220953209542095520956209572095820959209602096120962209632096420965209662096720968209692097020971209722097320974209752097620977209782097920980209812098220983209842098520986209872098820989209902099120992209932099420995209962099720998209992100021001210022100321004210052100621007210082100921010210112101221013210142101521016210172101821019210202102121022210232102421025210262102721028210292103021031210322103321034210352103621037210382103921040210412104221043210442104521046210472104821049210502105121052210532105421055210562105721058210592106021061210622106321064210652106621067210682106921070210712107221073210742107521076210772107821079210802108121082210832108421085210862108721088210892109021091210922109321094210952109621097210982109921100211012110221103211042110521106211072110821109211102111121112211132111421115211162111721118211192112021121211222112321124211252112621127211282112921130211312113221133211342113521136211372113821139211402114121142211432114421145211462114721148211492115021151211522115321154211552115621157211582115921160211612116221163211642116521166211672116821169211702117121172211732117421175211762117721178211792118021181211822118321184211852118621187211882118921190211912119221193211942119521196211972119821199212002120121202212032120421205212062120721208212092121021211212122121321214212152121621217212182121921220212212122221223212242122521226212272122821229212302123121232212332123421235212362123721238212392124021241212422124321244212452124621247212482124921250212512125221253212542125521256212572125821259212602126121262212632126421265212662126721268212692127021271212722127321274212752127621277212782127921280212812128221283212842128521286212872128821289212902129121292212932129421295212962129721298212992130021301213022130321304213052130621307213082130921310213112131221313213142131521316213172131821319213202132121322213232132421325213262132721328213292133021331213322133321334213352133621337213382133921340213412134221343213442134521346213472134821349213502135121352213532135421355213562135721358213592136021361213622136321364213652136621367213682136921370213712137221373213742137521376213772137821379213802138121382213832138421385213862138721388213892139021391213922139321394213952139621397213982139921400214012140221403214042140521406214072140821409214102141121412214132141421415214162141721418214192142021421214222142321424214252142621427214282142921430214312143221433214342143521436214372143821439214402144121442214432144421445214462144721448214492145021451214522145321454214552145621457214582145921460214612146221463214642146521466214672146821469214702147121472214732147421475214762147721478214792148021481214822148321484214852148621487214882148921490214912149221493214942149521496214972149821499215002150121502215032150421505215062150721508215092151021511215122151321514215152151621517215182151921520215212152221523215242152521526215272152821529215302153121532215332153421535215362153721538215392154021541215422154321544215452154621547215482154921550215512155221553215542155521556215572155821559215602156121562215632156421565215662156721568215692157021571215722157321574215752157621577215782157921580215812158221583215842158521586215872158821589215902159121592215932159421595215962159721598215992160021601216022160321604216052160621607216082160921610216112161221613216142161521616216172161821619216202162121622216232162421625216262162721628216292163021631216322163321634216352163621637216382163921640216412164221643216442164521646216472164821649216502165121652216532165421655216562165721658216592166021661216622166321664216652166621667216682166921670216712167221673216742167521676216772167821679216802168121682216832168421685216862168721688216892169021691216922169321694216952169621697216982169921700217012170221703217042170521706217072170821709217102171121712217132171421715217162171721718217192172021721217222172321724217252172621727217282172921730217312173221733217342173521736217372173821739217402174121742217432174421745217462174721748217492175021751217522175321754217552175621757217582175921760217612176221763217642176521766217672176821769217702177121772217732177421775217762177721778217792178021781217822178321784217852178621787217882178921790217912179221793217942179521796217972179821799218002180121802218032180421805218062180721808218092181021811218122181321814218152181621817218182181921820218212182221823218242182521826218272182821829218302183121832218332183421835218362183721838218392184021841218422184321844218452184621847218482184921850218512185221853218542185521856218572185821859218602186121862218632186421865218662186721868218692187021871218722187321874218752187621877218782187921880218812188221883218842188521886218872188821889218902189121892218932189421895218962189721898218992190021901219022190321904219052190621907219082190921910219112191221913219142191521916219172191821919219202192121922219232192421925219262192721928219292193021931219322193321934219352193621937219382193921940219412194221943219442194521946219472194821949219502195121952219532195421955219562195721958219592196021961219622196321964219652196621967219682196921970219712197221973219742197521976219772197821979219802198121982219832198421985219862198721988219892199021991219922199321994219952199621997219982199922000220012200222003220042200522006220072200822009220102201122012220132201422015220162201722018220192202022021220222202322024220252202622027220282202922030220312203222033220342203522036220372203822039220402204122042220432204422045220462204722048220492205022051220522205322054220552205622057220582205922060220612206222063220642206522066220672206822069220702207122072220732207422075220762207722078220792208022081220822208322084220852208622087220882208922090220912209222093220942209522096220972209822099221002210122102221032210422105221062210722108221092211022111221122211322114221152211622117221182211922120221212212222123221242212522126221272212822129221302213122132221332213422135221362213722138221392214022141221422214322144221452214622147221482214922150221512215222153221542215522156221572215822159221602216122162221632216422165221662216722168221692217022171221722217322174221752217622177221782217922180221812218222183221842218522186221872218822189221902219122192221932219422195221962219722198221992220022201222022220322204222052220622207222082220922210222112221222213222142221522216222172221822219222202222122222222232222422225222262222722228222292223022231222322223322234222352223622237222382223922240222412224222243222442224522246222472224822249222502225122252222532225422255222562225722258222592226022261222622226322264222652226622267222682226922270222712227222273222742227522276222772227822279222802228122282222832228422285222862228722288222892229022291222922229322294222952229622297222982229922300223012230222303223042230522306223072230822309223102231122312223132231422315223162231722318223192232022321223222232322324223252232622327223282232922330223312233222333223342233522336223372233822339223402234122342223432234422345223462234722348223492235022351223522235322354223552235622357223582235922360223612236222363223642236522366223672236822369223702237122372223732237422375223762237722378223792238022381223822238322384223852238622387223882238922390223912239222393223942239522396223972239822399224002240122402224032240422405224062240722408224092241022411224122241322414224152241622417224182241922420224212242222423224242242522426224272242822429224302243122432224332243422435224362243722438224392244022441224422244322444224452244622447224482244922450224512245222453224542245522456224572245822459224602246122462224632246422465224662246722468224692247022471224722247322474224752247622477224782247922480224812248222483224842248522486224872248822489224902249122492224932249422495224962249722498224992250022501225022250322504225052250622507225082250922510225112251222513225142251522516225172251822519225202252122522225232252422525225262252722528225292253022531225322253322534225352253622537225382253922540225412254222543225442254522546225472254822549225502255122552225532255422555225562255722558225592256022561225622256322564225652256622567225682256922570225712257222573225742257522576225772257822579225802258122582225832258422585225862258722588225892259022591225922259322594225952259622597225982259922600226012260222603226042260522606226072260822609226102261122612226132261422615226162261722618226192262022621226222262322624226252262622627226282262922630226312263222633226342263522636226372263822639226402264122642226432264422645226462264722648226492265022651226522265322654226552265622657226582265922660226612266222663226642266522666226672266822669226702267122672226732267422675226762267722678226792268022681226822268322684226852268622687226882268922690226912269222693226942269522696226972269822699227002270122702227032270422705227062270722708227092271022711227122271322714227152271622717227182271922720227212272222723227242272522726227272272822729227302273122732227332273422735227362273722738227392274022741227422274322744227452274622747227482274922750227512275222753227542275522756227572275822759227602276122762227632276422765227662276722768227692277022771227722277322774227752277622777227782277922780227812278222783227842278522786227872278822789227902279122792227932279422795227962279722798227992280022801228022280322804228052280622807228082280922810228112281222813228142281522816228172281822819228202282122822228232282422825228262282722828228292283022831228322283322834228352283622837228382283922840228412284222843228442284522846228472284822849228502285122852228532285422855228562285722858228592286022861228622286322864228652286622867228682286922870228712287222873228742287522876228772287822879228802288122882228832288422885228862288722888228892289022891228922289322894228952289622897228982289922900229012290222903229042290522906229072290822909229102291122912229132291422915229162291722918229192292022921229222292322924229252292622927229282292922930229312293222933229342293522936229372293822939229402294122942229432294422945229462294722948229492295022951229522295322954229552295622957229582295922960229612296222963229642296522966229672296822969229702297122972229732297422975229762297722978229792298022981229822298322984229852298622987229882298922990229912299222993229942299522996229972299822999230002300123002230032300423005230062300723008230092301023011230122301323014230152301623017230182301923020230212302223023230242302523026230272302823029230302303123032230332303423035230362303723038230392304023041230422304323044230452304623047230482304923050230512305223053230542305523056230572305823059230602306123062230632306423065230662306723068230692307023071230722307323074230752307623077230782307923080230812308223083230842308523086230872308823089230902309123092230932309423095230962309723098230992310023101231022310323104231052310623107231082310923110231112311223113231142311523116231172311823119231202312123122231232312423125231262312723128231292313023131231322313323134231352313623137231382313923140231412314223143231442314523146231472314823149231502315123152231532315423155231562315723158231592316023161231622316323164231652316623167231682316923170231712317223173231742317523176231772317823179231802318123182231832318423185231862318723188231892319023191231922319323194231952319623197231982319923200232012320223203232042320523206232072320823209232102321123212232132321423215232162321723218232192322023221232222322323224232252322623227232282322923230232312323223233232342323523236232372323823239232402324123242232432324423245232462324723248232492325023251232522325323254232552325623257232582325923260232612326223263232642326523266232672326823269232702327123272232732327423275232762327723278232792328023281232822328323284232852328623287232882328923290232912329223293232942329523296232972329823299233002330123302233032330423305233062330723308233092331023311233122331323314233152331623317233182331923320233212332223323233242332523326233272332823329233302333123332233332333423335233362333723338233392334023341233422334323344233452334623347233482334923350233512335223353233542335523356233572335823359233602336123362233632336423365233662336723368233692337023371233722337323374233752337623377233782337923380233812338223383233842338523386233872338823389233902339123392233932339423395233962339723398233992340023401234022340323404234052340623407234082340923410234112341223413234142341523416234172341823419234202342123422234232342423425234262342723428234292343023431234322343323434234352343623437234382343923440234412344223443234442344523446234472344823449234502345123452234532345423455234562345723458234592346023461234622346323464234652346623467234682346923470234712347223473234742347523476234772347823479234802348123482234832348423485234862348723488234892349023491234922349323494234952349623497234982349923500235012350223503235042350523506235072350823509235102351123512235132351423515235162351723518235192352023521235222352323524235252352623527235282352923530235312353223533235342353523536235372353823539235402354123542235432354423545235462354723548235492355023551235522355323554235552355623557235582355923560235612356223563235642356523566235672356823569235702357123572235732357423575235762357723578235792358023581235822358323584235852358623587235882358923590235912359223593235942359523596235972359823599236002360123602236032360423605236062360723608236092361023611236122361323614236152361623617236182361923620236212362223623236242362523626236272362823629236302363123632236332363423635236362363723638236392364023641236422364323644236452364623647236482364923650236512365223653236542365523656236572365823659236602366123662236632366423665236662366723668236692367023671236722367323674236752367623677236782367923680236812368223683236842368523686236872368823689236902369123692236932369423695236962369723698236992370023701237022370323704237052370623707237082370923710237112371223713237142371523716237172371823719237202372123722237232372423725237262372723728237292373023731237322373323734237352373623737237382373923740237412374223743237442374523746237472374823749237502375123752237532375423755237562375723758237592376023761237622376323764237652376623767237682376923770237712377223773237742377523776237772377823779237802378123782237832378423785237862378723788237892379023791237922379323794237952379623797237982379923800238012380223803238042380523806238072380823809238102381123812238132381423815238162381723818238192382023821238222382323824238252382623827238282382923830238312383223833238342383523836238372383823839238402384123842238432384423845238462384723848238492385023851238522385323854238552385623857238582385923860238612386223863238642386523866238672386823869238702387123872238732387423875238762387723878238792388023881238822388323884238852388623887238882388923890238912389223893238942389523896238972389823899239002390123902239032390423905239062390723908239092391023911239122391323914239152391623917239182391923920239212392223923239242392523926239272392823929239302393123932239332393423935239362393723938239392394023941239422394323944239452394623947239482394923950239512395223953239542395523956239572395823959239602396123962239632396423965239662396723968239692397023971239722397323974239752397623977239782397923980239812398223983239842398523986239872398823989239902399123992239932399423995239962399723998239992400024001240022400324004240052400624007240082400924010240112401224013240142401524016240172401824019240202402124022240232402424025240262402724028240292403024031240322403324034240352403624037240382403924040240412404224043240442404524046240472404824049240502405124052240532405424055240562405724058240592406024061240622406324064240652406624067240682406924070240712407224073240742407524076240772407824079240802408124082240832408424085240862408724088240892409024091240922409324094240952409624097240982409924100241012410224103241042410524106241072410824109241102411124112241132411424115241162411724118241192412024121241222412324124241252412624127241282412924130241312413224133241342413524136241372413824139241402414124142241432414424145241462414724148241492415024151241522415324154241552415624157241582415924160241612416224163241642416524166241672416824169241702417124172241732417424175241762417724178241792418024181241822418324184241852418624187241882418924190241912419224193241942419524196241972419824199242002420124202242032420424205242062420724208242092421024211242122421324214242152421624217242182421924220242212422224223242242422524226242272422824229242302423124232242332423424235242362423724238242392424024241242422424324244242452424624247242482424924250242512425224253242542425524256242572425824259242602426124262242632426424265242662426724268242692427024271242722427324274242752427624277242782427924280242812428224283242842428524286242872428824289242902429124292242932429424295242962429724298242992430024301243022430324304243052430624307243082430924310243112431224313243142431524316243172431824319243202432124322243232432424325243262432724328243292433024331243322433324334243352433624337243382433924340243412434224343243442434524346243472434824349243502435124352243532435424355243562435724358243592436024361243622436324364243652436624367243682436924370243712437224373243742437524376243772437824379243802438124382243832438424385243862438724388243892439024391243922439324394243952439624397243982439924400244012440224403244042440524406244072440824409244102441124412244132441424415244162441724418244192442024421244222442324424244252442624427244282442924430244312443224433244342443524436244372443824439244402444124442244432444424445244462444724448244492445024451244522445324454244552445624457244582445924460244612446224463244642446524466244672446824469244702447124472244732447424475244762447724478244792448024481244822448324484244852448624487244882448924490244912449224493244942449524496244972449824499245002450124502245032450424505245062450724508245092451024511245122451324514245152451624517245182451924520245212452224523245242452524526245272452824529245302453124532245332453424535245362453724538245392454024541245422454324544245452454624547245482454924550245512455224553245542455524556245572455824559245602456124562245632456424565245662456724568245692457024571245722457324574245752457624577245782457924580245812458224583245842458524586245872458824589245902459124592245932459424595245962459724598245992460024601246022460324604246052460624607246082460924610246112461224613246142461524616246172461824619246202462124622246232462424625246262462724628246292463024631246322463324634246352463624637246382463924640246412464224643246442464524646246472464824649246502465124652246532465424655246562465724658246592466024661246622466324664246652466624667246682466924670246712467224673246742467524676246772467824679246802468124682246832468424685246862468724688246892469024691246922469324694246952469624697246982469924700247012470224703247042470524706247072470824709247102471124712247132471424715247162471724718247192472024721247222472324724247252472624727247282472924730247312473224733247342473524736247372473824739247402474124742247432474424745247462474724748247492475024751247522475324754247552475624757247582475924760247612476224763247642476524766247672476824769247702477124772247732477424775247762477724778247792478024781247822478324784247852478624787247882478924790247912479224793247942479524796247972479824799248002480124802248032480424805248062480724808248092481024811248122481324814248152481624817248182481924820248212482224823248242482524826248272482824829248302483124832248332483424835248362483724838248392484024841248422484324844248452484624847248482484924850248512485224853248542485524856248572485824859248602486124862248632486424865248662486724868248692487024871248722487324874248752487624877248782487924880248812488224883248842488524886248872488824889248902489124892248932489424895248962489724898248992490024901249022490324904249052490624907249082490924910249112491224913249142491524916249172491824919249202492124922249232492424925249262492724928249292493024931249322493324934249352493624937249382493924940249412494224943249442494524946249472494824949249502495124952249532495424955249562495724958249592496024961249622496324964249652496624967249682496924970249712497224973249742497524976249772497824979249802498124982249832498424985249862498724988249892499024991249922499324994249952499624997249982499925000250012500225003250042500525006250072500825009250102501125012250132501425015250162501725018250192502025021250222502325024250252502625027250282502925030250312503225033250342503525036250372503825039250402504125042250432504425045250462504725048250492505025051250522505325054250552505625057250582505925060250612506225063250642506525066250672506825069250702507125072250732507425075250762507725078250792508025081250822508325084250852508625087250882508925090250912509225093250942509525096250972509825099251002510125102251032510425105251062510725108251092511025111251122511325114251152511625117251182511925120251212512225123251242512525126251272512825129251302513125132251332513425135251362513725138251392514025141251422514325144251452514625147251482514925150251512515225153251542515525156251572515825159251602516125162251632516425165251662516725168251692517025171251722517325174251752517625177251782517925180251812518225183251842518525186251872518825189251902519125192251932519425195251962519725198251992520025201252022520325204252052520625207252082520925210252112521225213252142521525216252172521825219252202522125222252232522425225252262522725228252292523025231252322523325234252352523625237252382523925240252412524225243252442524525246252472524825249252502525125252252532525425255252562525725258252592526025261252622526325264252652526625267252682526925270252712527225273252742527525276252772527825279252802528125282252832528425285252862528725288252892529025291252922529325294252952529625297252982529925300253012530225303253042530525306253072530825309253102531125312253132531425315253162531725318253192532025321253222532325324253252532625327253282532925330253312533225333253342533525336253372533825339253402534125342253432534425345253462534725348253492535025351253522535325354253552535625357253582535925360253612536225363253642536525366253672536825369253702537125372253732537425375253762537725378253792538025381253822538325384253852538625387253882538925390253912539225393253942539525396253972539825399254002540125402254032540425405254062540725408254092541025411254122541325414254152541625417254182541925420254212542225423254242542525426254272542825429254302543125432254332543425435254362543725438254392544025441254422544325444254452544625447254482544925450254512545225453254542545525456254572545825459254602546125462254632546425465254662546725468254692547025471254722547325474254752547625477254782547925480254812548225483254842548525486254872548825489254902549125492254932549425495254962549725498254992550025501255022550325504255052550625507255082550925510255112551225513255142551525516255172551825519255202552125522255232552425525255262552725528255292553025531255322553325534255352553625537255382553925540255412554225543255442554525546255472554825549255502555125552255532555425555255562555725558255592556025561255622556325564255652556625567255682556925570255712557225573255742557525576255772557825579255802558125582255832558425585255862558725588255892559025591255922559325594255952559625597255982559925600256012560225603256042560525606256072560825609256102561125612256132561425615256162561725618256192562025621256222562325624256252562625627256282562925630256312563225633256342563525636256372563825639256402564125642256432564425645256462564725648256492565025651256522565325654256552565625657256582565925660256612566225663256642566525666256672566825669256702567125672256732567425675256762567725678256792568025681256822568325684256852568625687256882568925690256912569225693256942569525696256972569825699257002570125702257032570425705257062570725708257092571025711257122571325714257152571625717257182571925720257212572225723257242572525726257272572825729257302573125732257332573425735257362573725738257392574025741257422574325744257452574625747257482574925750257512575225753257542575525756257572575825759257602576125762257632576425765257662576725768257692577025771257722577325774257752577625777257782577925780257812578225783257842578525786257872578825789257902579125792257932579425795257962579725798257992580025801258022580325804258052580625807258082580925810258112581225813258142581525816258172581825819258202582125822258232582425825258262582725828258292583025831258322583325834258352583625837258382583925840258412584225843258442584525846258472584825849258502585125852258532585425855258562585725858258592586025861258622586325864258652586625867258682586925870258712587225873258742587525876258772587825879258802588125882258832588425885258862588725888258892589025891258922589325894258952589625897258982589925900259012590225903259042590525906259072590825909259102591125912259132591425915259162591725918259192592025921259222592325924259252592625927259282592925930259312593225933259342593525936259372593825939259402594125942259432594425945259462594725948259492595025951259522595325954259552595625957259582595925960259612596225963259642596525966259672596825969259702597125972259732597425975259762597725978259792598025981259822598325984259852598625987259882598925990259912599225993259942599525996259972599825999260002600126002260032600426005260062600726008260092601026011260122601326014260152601626017260182601926020260212602226023260242602526026260272602826029260302603126032260332603426035260362603726038260392604026041260422604326044260452604626047260482604926050260512605226053260542605526056260572605826059260602606126062260632606426065260662606726068260692607026071260722607326074260752607626077260782607926080260812608226083260842608526086260872608826089260902609126092260932609426095260962609726098260992610026101261022610326104261052610626107261082610926110261112611226113261142611526116261172611826119261202612126122261232612426125261262612726128261292613026131261322613326134261352613626137261382613926140261412614226143261442614526146261472614826149261502615126152261532615426155261562615726158261592616026161261622616326164261652616626167261682616926170261712617226173261742617526176261772617826179261802618126182261832618426185261862618726188261892619026191261922619326194261952619626197261982619926200262012620226203262042620526206262072620826209262102621126212262132621426215262162621726218262192622026221262222622326224262252622626227262282622926230262312623226233262342623526236262372623826239262402624126242262432624426245262462624726248262492625026251262522625326254262552625626257262582625926260262612626226263262642626526266262672626826269262702627126272262732627426275262762627726278262792628026281262822628326284262852628626287262882628926290262912629226293262942629526296262972629826299263002630126302263032630426305263062630726308263092631026311263122631326314263152631626317263182631926320263212632226323263242632526326263272632826329263302633126332263332633426335263362633726338263392634026341263422634326344263452634626347263482634926350263512635226353263542635526356263572635826359263602636126362263632636426365263662636726368263692637026371263722637326374263752637626377263782637926380263812638226383263842638526386263872638826389263902639126392263932639426395263962639726398263992640026401264022640326404264052640626407264082640926410264112641226413264142641526416264172641826419264202642126422264232642426425264262642726428264292643026431264322643326434264352643626437264382643926440264412644226443264442644526446264472644826449264502645126452264532645426455264562645726458264592646026461264622646326464264652646626467264682646926470264712647226473264742647526476264772647826479264802648126482264832648426485264862648726488264892649026491264922649326494264952649626497264982649926500265012650226503265042650526506265072650826509265102651126512265132651426515265162651726518265192652026521265222652326524265252652626527265282652926530265312653226533265342653526536265372653826539265402654126542265432654426545265462654726548265492655026551265522655326554265552655626557265582655926560265612656226563265642656526566265672656826569265702657126572265732657426575265762657726578265792658026581265822658326584265852658626587265882658926590265912659226593265942659526596265972659826599266002660126602266032660426605266062660726608266092661026611266122661326614266152661626617266182661926620266212662226623266242662526626266272662826629266302663126632266332663426635266362663726638266392664026641266422664326644266452664626647266482664926650266512665226653266542665526656266572665826659266602666126662266632666426665266662666726668266692667026671266722667326674266752667626677266782667926680266812668226683266842668526686266872668826689266902669126692266932669426695266962669726698266992670026701267022670326704267052670626707267082670926710267112671226713267142671526716267172671826719267202672126722267232672426725267262672726728267292673026731267322673326734267352673626737267382673926740267412674226743267442674526746267472674826749267502675126752267532675426755267562675726758267592676026761267622676326764267652676626767267682676926770267712677226773267742677526776267772677826779267802678126782267832678426785267862678726788267892679026791267922679326794267952679626797267982679926800268012680226803268042680526806268072680826809268102681126812268132681426815268162681726818268192682026821268222682326824268252682626827268282682926830268312683226833268342683526836268372683826839268402684126842268432684426845268462684726848268492685026851268522685326854268552685626857268582685926860268612686226863268642686526866268672686826869268702687126872268732687426875268762687726878268792688026881268822688326884268852688626887268882688926890268912689226893268942689526896268972689826899269002690126902269032690426905269062690726908269092691026911269122691326914269152691626917269182691926920269212692226923269242692526926269272692826929269302693126932269332693426935269362693726938269392694026941269422694326944269452694626947269482694926950269512695226953269542695526956269572695826959269602696126962269632696426965269662696726968269692697026971269722697326974269752697626977269782697926980269812698226983269842698526986269872698826989269902699126992269932699426995269962699726998269992700027001270022700327004270052700627007270082700927010270112701227013270142701527016270172701827019270202702127022270232702427025270262702727028270292703027031270322703327034270352703627037270382703927040270412704227043270442704527046270472704827049270502705127052270532705427055270562705727058270592706027061270622706327064270652706627067270682706927070270712707227073270742707527076270772707827079270802708127082270832708427085270862708727088270892709027091270922709327094270952709627097270982709927100271012710227103271042710527106271072710827109271102711127112271132711427115271162711727118271192712027121271222712327124271252712627127271282712927130271312713227133271342713527136271372713827139271402714127142271432714427145271462714727148271492715027151271522715327154271552715627157271582715927160271612716227163271642716527166271672716827169271702717127172271732717427175271762717727178271792718027181271822718327184271852718627187271882718927190271912719227193271942719527196271972719827199272002720127202272032720427205272062720727208272092721027211272122721327214272152721627217272182721927220272212722227223272242722527226272272722827229272302723127232272332723427235272362723727238272392724027241272422724327244272452724627247272482724927250272512725227253272542725527256272572725827259272602726127262272632726427265272662726727268272692727027271272722727327274272752727627277272782727927280272812728227283272842728527286272872728827289272902729127292272932729427295272962729727298272992730027301273022730327304273052730627307273082730927310273112731227313273142731527316273172731827319273202732127322273232732427325273262732727328273292733027331273322733327334273352733627337273382733927340273412734227343273442734527346273472734827349273502735127352273532735427355273562735727358273592736027361273622736327364273652736627367273682736927370273712737227373273742737527376273772737827379273802738127382273832738427385273862738727388273892739027391273922739327394273952739627397273982739927400274012740227403274042740527406274072740827409274102741127412274132741427415274162741727418274192742027421274222742327424274252742627427274282742927430274312743227433274342743527436274372743827439274402744127442274432744427445274462744727448274492745027451274522745327454274552745627457274582745927460274612746227463274642746527466274672746827469274702747127472274732747427475274762747727478274792748027481274822748327484274852748627487274882748927490274912749227493274942749527496274972749827499275002750127502275032750427505275062750727508275092751027511275122751327514275152751627517275182751927520275212752227523275242752527526275272752827529275302753127532275332753427535275362753727538275392754027541275422754327544275452754627547275482754927550275512755227553275542755527556275572755827559275602756127562275632756427565275662756727568275692757027571275722757327574275752757627577275782757927580275812758227583275842758527586275872758827589275902759127592275932759427595275962759727598275992760027601276022760327604276052760627607276082760927610276112761227613276142761527616276172761827619276202762127622276232762427625276262762727628276292763027631276322763327634276352763627637276382763927640276412764227643276442764527646276472764827649276502765127652276532765427655276562765727658276592766027661276622766327664276652766627667276682766927670276712767227673276742767527676276772767827679276802768127682276832768427685276862768727688276892769027691276922769327694276952769627697276982769927700277012770227703277042770527706277072770827709277102771127712277132771427715277162771727718277192772027721277222772327724277252772627727277282772927730277312773227733277342773527736277372773827739277402774127742277432774427745277462774727748277492775027751277522775327754277552775627757277582775927760277612776227763277642776527766277672776827769277702777127772277732777427775277762777727778277792778027781277822778327784277852778627787277882778927790277912779227793277942779527796277972779827799278002780127802278032780427805278062780727808278092781027811278122781327814278152781627817278182781927820278212782227823278242782527826278272782827829278302783127832278332783427835278362783727838278392784027841278422784327844278452784627847278482784927850278512785227853278542785527856278572785827859278602786127862278632786427865278662786727868278692787027871278722787327874278752787627877278782787927880278812788227883278842788527886278872788827889278902789127892278932789427895278962789727898278992790027901279022790327904279052790627907279082790927910279112791227913279142791527916279172791827919279202792127922279232792427925279262792727928279292793027931279322793327934279352793627937279382793927940279412794227943279442794527946279472794827949279502795127952279532795427955279562795727958279592796027961279622796327964279652796627967279682796927970279712797227973279742797527976279772797827979279802798127982279832798427985279862798727988279892799027991279922799327994279952799627997279982799928000280012800228003280042800528006280072800828009280102801128012280132801428015280162801728018280192802028021280222802328024280252802628027280282802928030280312803228033280342803528036280372803828039280402804128042280432804428045280462804728048280492805028051280522805328054280552805628057280582805928060280612806228063280642806528066280672806828069280702807128072280732807428075280762807728078280792808028081280822808328084280852808628087280882808928090280912809228093280942809528096280972809828099281002810128102281032810428105281062810728108281092811028111281122811328114281152811628117281182811928120281212812228123281242812528126281272812828129281302813128132281332813428135281362813728138281392814028141281422814328144281452814628147281482814928150281512815228153281542815528156281572815828159281602816128162281632816428165281662816728168281692817028171281722817328174281752817628177281782817928180281812818228183281842818528186281872818828189281902819128192281932819428195281962819728198281992820028201282022820328204282052820628207282082820928210282112821228213282142821528216282172821828219282202822128222282232822428225282262822728228282292823028231282322823328234282352823628237282382823928240282412824228243282442824528246282472824828249282502825128252282532825428255282562825728258282592826028261282622826328264282652826628267282682826928270282712827228273282742827528276282772827828279282802828128282282832828428285282862828728288282892829028291282922829328294282952829628297282982829928300283012830228303283042830528306283072830828309283102831128312283132831428315283162831728318283192832028321283222832328324283252832628327283282832928330283312833228333283342833528336283372833828339283402834128342283432834428345283462834728348283492835028351283522835328354283552835628357283582835928360283612836228363283642836528366283672836828369283702837128372283732837428375283762837728378283792838028381283822838328384283852838628387283882838928390283912839228393283942839528396283972839828399284002840128402284032840428405284062840728408284092841028411284122841328414284152841628417284182841928420284212842228423284242842528426284272842828429284302843128432284332843428435284362843728438284392844028441284422844328444284452844628447284482844928450284512845228453284542845528456284572845828459284602846128462284632846428465284662846728468284692847028471284722847328474284752847628477284782847928480284812848228483284842848528486284872848828489284902849128492284932849428495284962849728498284992850028501285022850328504285052850628507285082850928510285112851228513285142851528516285172851828519285202852128522285232852428525285262852728528285292853028531285322853328534285352853628537285382853928540285412854228543285442854528546285472854828549285502855128552285532855428555285562855728558285592856028561285622856328564285652856628567285682856928570285712857228573285742857528576285772857828579285802858128582285832858428585285862858728588285892859028591285922859328594285952859628597285982859928600286012860228603286042860528606286072860828609286102861128612286132861428615286162861728618286192862028621286222862328624286252862628627286282862928630286312863228633286342863528636286372863828639286402864128642286432864428645286462864728648286492865028651286522865328654286552865628657286582865928660286612866228663286642866528666286672866828669286702867128672286732867428675286762867728678286792868028681286822868328684286852868628687286882868928690286912869228693286942869528696286972869828699287002870128702287032870428705287062870728708287092871028711287122871328714287152871628717287182871928720287212872228723287242872528726287272872828729287302873128732287332873428735287362873728738287392874028741287422874328744287452874628747287482874928750287512875228753287542875528756287572875828759287602876128762287632876428765287662876728768287692877028771287722877328774287752877628777287782877928780287812878228783287842878528786287872878828789287902879128792287932879428795287962879728798287992880028801288022880328804288052880628807288082880928810288112881228813288142881528816288172881828819288202882128822288232882428825288262882728828288292883028831288322883328834288352883628837288382883928840288412884228843288442884528846288472884828849288502885128852288532885428855288562885728858288592886028861288622886328864288652886628867288682886928870288712887228873288742887528876288772887828879288802888128882288832888428885288862888728888288892889028891288922889328894288952889628897288982889928900289012890228903289042890528906289072890828909289102891128912289132891428915289162891728918289192892028921289222892328924289252892628927289282892928930289312893228933289342893528936289372893828939289402894128942289432894428945289462894728948289492895028951289522895328954289552895628957289582895928960289612896228963289642896528966289672896828969289702897128972289732897428975289762897728978289792898028981289822898328984289852898628987289882898928990289912899228993289942899528996289972899828999290002900129002290032900429005290062900729008290092901029011290122901329014290152901629017290182901929020290212902229023290242902529026290272902829029290302903129032290332903429035290362903729038290392904029041290422904329044290452904629047290482904929050290512905229053290542905529056290572905829059290602906129062290632906429065290662906729068290692907029071290722907329074290752907629077290782907929080290812908229083290842908529086290872908829089290902909129092290932909429095290962909729098290992910029101291022910329104291052910629107291082910929110291112911229113291142911529116291172911829119291202912129122291232912429125291262912729128291292913029131291322913329134291352913629137291382913929140291412914229143291442914529146291472914829149291502915129152291532915429155291562915729158291592916029161291622916329164291652916629167291682916929170291712917229173291742917529176291772917829179291802918129182291832918429185291862918729188291892919029191291922919329194291952919629197291982919929200292012920229203292042920529206292072920829209292102921129212292132921429215292162921729218292192922029221292222922329224292252922629227292282922929230292312923229233292342923529236292372923829239292402924129242292432924429245292462924729248292492925029251292522925329254292552925629257292582925929260292612926229263292642926529266292672926829269292702927129272292732927429275292762927729278292792928029281292822928329284292852928629287292882928929290292912929229293292942929529296292972929829299293002930129302293032930429305293062930729308293092931029311293122931329314293152931629317293182931929320293212932229323293242932529326293272932829329293302933129332293332933429335293362933729338293392934029341293422934329344293452934629347293482934929350293512935229353293542935529356293572935829359293602936129362293632936429365293662936729368293692937029371293722937329374293752937629377293782937929380293812938229383293842938529386293872938829389293902939129392293932939429395293962939729398293992940029401294022940329404294052940629407294082940929410294112941229413294142941529416294172941829419294202942129422294232942429425294262942729428294292943029431294322943329434294352943629437294382943929440294412944229443294442944529446294472944829449294502945129452294532945429455294562945729458294592946029461294622946329464294652946629467294682946929470294712947229473294742947529476294772947829479294802948129482294832948429485294862948729488294892949029491294922949329494294952949629497294982949929500295012950229503295042950529506295072950829509295102951129512295132951429515295162951729518295192952029521295222952329524295252952629527295282952929530295312953229533295342953529536295372953829539295402954129542295432954429545295462954729548295492955029551295522955329554295552955629557295582955929560295612956229563295642956529566295672956829569295702957129572295732957429575295762957729578295792958029581295822958329584295852958629587295882958929590295912959229593295942959529596295972959829599296002960129602296032960429605296062960729608296092961029611296122961329614296152961629617296182961929620296212962229623296242962529626296272962829629296302963129632296332963429635296362963729638296392964029641296422964329644296452964629647296482964929650296512965229653296542965529656296572965829659296602966129662296632966429665296662966729668296692967029671296722967329674296752967629677296782967929680296812968229683296842968529686296872968829689296902969129692296932969429695296962969729698296992970029701297022970329704297052970629707297082970929710297112971229713297142971529716297172971829719297202972129722297232972429725297262972729728297292973029731297322973329734297352973629737297382973929740297412974229743297442974529746297472974829749297502975129752297532975429755297562975729758297592976029761297622976329764297652976629767297682976929770297712977229773297742977529776297772977829779297802978129782297832978429785297862978729788297892979029791297922979329794297952979629797297982979929800298012980229803298042980529806298072980829809298102981129812298132981429815298162981729818298192982029821298222982329824298252982629827298282982929830298312983229833298342983529836298372983829839298402984129842298432984429845298462984729848298492985029851298522985329854298552985629857298582985929860298612986229863298642986529866298672986829869298702987129872298732987429875298762987729878298792988029881298822988329884298852988629887298882988929890298912989229893298942989529896298972989829899299002990129902299032990429905299062990729908299092991029911299122991329914299152991629917299182991929920299212992229923299242992529926299272992829929299302993129932299332993429935299362993729938299392994029941299422994329944299452994629947299482994929950299512995229953299542995529956299572995829959299602996129962299632996429965299662996729968299692997029971299722997329974299752997629977299782997929980299812998229983299842998529986299872998829989299902999129992299932999429995299962999729998299993000030001300023000330004300053000630007300083000930010300113001230013300143001530016300173001830019300203002130022300233002430025300263002730028300293003030031300323003330034300353003630037300383003930040300413004230043300443004530046300473004830049300503005130052300533005430055300563005730058300593006030061300623006330064300653006630067300683006930070300713007230073300743007530076300773007830079300803008130082300833008430085300863008730088300893009030091300923009330094300953009630097300983009930100301013010230103301043010530106301073010830109301103011130112301133011430115301163011730118301193012030121301223012330124301253012630127301283012930130301313013230133301343013530136301373013830139301403014130142301433014430145301463014730148301493015030151301523015330154301553015630157301583015930160301613016230163301643016530166301673016830169301703017130172301733017430175301763017730178301793018030181301823018330184301853018630187301883018930190301913019230193301943019530196301973019830199302003020130202302033020430205302063020730208302093021030211302123021330214302153021630217302183021930220302213022230223302243022530226302273022830229302303023130232302333023430235302363023730238302393024030241302423024330244302453024630247302483024930250302513025230253302543025530256302573025830259302603026130262302633026430265302663026730268302693027030271302723027330274302753027630277302783027930280302813028230283302843028530286302873028830289302903029130292302933029430295302963029730298302993030030301303023030330304303053030630307303083030930310303113031230313303143031530316303173031830319303203032130322303233032430325303263032730328303293033030331303323033330334303353033630337303383033930340303413034230343303443034530346303473034830349303503035130352303533035430355303563035730358303593036030361303623036330364303653036630367303683036930370303713037230373303743037530376303773037830379303803038130382303833038430385303863038730388303893039030391303923039330394303953039630397303983039930400304013040230403304043040530406304073040830409304103041130412304133041430415304163041730418304193042030421304223042330424304253042630427304283042930430304313043230433304343043530436304373043830439304403044130442304433044430445304463044730448304493045030451304523045330454304553045630457304583045930460304613046230463304643046530466304673046830469304703047130472304733047430475304763047730478304793048030481304823048330484304853048630487304883048930490304913049230493304943049530496304973049830499305003050130502305033050430505305063050730508305093051030511305123051330514305153051630517305183051930520305213052230523305243052530526305273052830529305303053130532305333053430535305363053730538305393054030541305423054330544305453054630547305483054930550305513055230553305543055530556305573055830559305603056130562305633056430565305663056730568305693057030571305723057330574305753057630577305783057930580305813058230583305843058530586305873058830589305903059130592305933059430595305963059730598305993060030601306023060330604306053060630607306083060930610306113061230613306143061530616306173061830619306203062130622306233062430625306263062730628306293063030631306323063330634306353063630637306383063930640306413064230643306443064530646306473064830649306503065130652306533065430655306563065730658306593066030661306623066330664306653066630667306683066930670306713067230673306743067530676306773067830679306803068130682306833068430685306863068730688306893069030691306923069330694306953069630697306983069930700307013070230703307043070530706307073070830709307103071130712307133071430715307163071730718307193072030721307223072330724307253072630727307283072930730307313073230733307343073530736307373073830739307403074130742307433074430745307463074730748307493075030751307523075330754307553075630757307583075930760307613076230763307643076530766307673076830769307703077130772307733077430775307763077730778307793078030781307823078330784307853078630787307883078930790307913079230793307943079530796307973079830799308003080130802308033080430805308063080730808308093081030811308123081330814308153081630817308183081930820308213082230823308243082530826308273082830829308303083130832308333083430835308363083730838308393084030841308423084330844308453084630847308483084930850308513085230853308543085530856308573085830859308603086130862308633086430865308663086730868308693087030871308723087330874308753087630877308783087930880308813088230883308843088530886308873088830889308903089130892308933089430895308963089730898308993090030901309023090330904309053090630907309083090930910309113091230913309143091530916309173091830919309203092130922309233092430925309263092730928309293093030931309323093330934309353093630937309383093930940309413094230943309443094530946309473094830949309503095130952309533095430955309563095730958309593096030961309623096330964309653096630967309683096930970309713097230973309743097530976309773097830979309803098130982309833098430985309863098730988309893099030991309923099330994309953099630997309983099931000310013100231003310043100531006310073100831009310103101131012310133101431015310163101731018310193102031021310223102331024310253102631027310283102931030310313103231033310343103531036310373103831039310403104131042310433104431045310463104731048310493105031051310523105331054310553105631057310583105931060310613106231063310643106531066310673106831069310703107131072310733107431075310763107731078310793108031081310823108331084310853108631087310883108931090310913109231093310943109531096310973109831099311003110131102311033110431105311063110731108311093111031111311123111331114311153111631117311183111931120311213112231123311243112531126311273112831129311303113131132311333113431135311363113731138311393114031141311423114331144311453114631147311483114931150311513115231153311543115531156311573115831159311603116131162311633116431165311663116731168311693117031171311723117331174311753117631177311783117931180311813118231183311843118531186311873118831189311903119131192311933119431195311963119731198311993120031201312023120331204312053120631207312083120931210312113121231213312143121531216312173121831219312203122131222312233122431225312263122731228312293123031231312323123331234312353123631237312383123931240312413124231243312443124531246312473124831249312503125131252312533125431255312563125731258312593126031261312623126331264312653126631267312683126931270312713127231273312743127531276312773127831279312803128131282312833128431285312863128731288312893129031291312923129331294312953129631297312983129931300313013130231303313043130531306313073130831309313103131131312313133131431315313163131731318313193132031321313223132331324313253132631327313283132931330313313133231333313343133531336313373133831339313403134131342313433134431345313463134731348313493135031351313523135331354313553135631357313583135931360313613136231363313643136531366313673136831369313703137131372313733137431375313763137731378313793138031381313823138331384313853138631387313883138931390313913139231393313943139531396313973139831399314003140131402314033140431405314063140731408314093141031411314123141331414314153141631417314183141931420314213142231423314243142531426314273142831429314303143131432314333143431435314363143731438314393144031441314423144331444314453144631447314483144931450314513145231453314543145531456314573145831459314603146131462314633146431465314663146731468314693147031471314723147331474314753147631477314783147931480314813148231483314843148531486314873148831489314903149131492314933149431495314963149731498314993150031501315023150331504315053150631507315083150931510315113151231513315143151531516315173151831519315203152131522315233152431525315263152731528315293153031531315323153331534315353153631537315383153931540315413154231543315443154531546315473154831549315503155131552315533155431555315563155731558315593156031561315623156331564315653156631567315683156931570315713157231573315743157531576315773157831579315803158131582315833158431585315863158731588315893159031591315923159331594315953159631597315983159931600316013160231603316043160531606316073160831609316103161131612316133161431615316163161731618316193162031621316223162331624316253162631627316283162931630316313163231633316343163531636316373163831639316403164131642316433164431645316463164731648316493165031651316523165331654316553165631657316583165931660316613166231663316643166531666316673166831669316703167131672316733167431675316763167731678316793168031681316823168331684316853168631687316883168931690316913169231693316943169531696316973169831699317003170131702317033170431705317063170731708317093171031711317123171331714317153171631717317183171931720317213172231723317243172531726317273172831729317303173131732317333173431735317363173731738317393174031741317423174331744317453174631747317483174931750317513175231753317543175531756317573175831759317603176131762317633176431765317663176731768317693177031771317723177331774317753177631777317783177931780317813178231783317843178531786317873178831789317903179131792317933179431795317963179731798317993180031801318023180331804318053180631807318083180931810318113181231813318143181531816318173181831819318203182131822318233182431825318263182731828318293183031831318323183331834318353183631837318383183931840318413184231843318443184531846318473184831849318503185131852318533185431855318563185731858318593186031861318623186331864318653186631867318683186931870318713187231873318743187531876318773187831879318803188131882318833188431885318863188731888318893189031891318923189331894318953189631897318983189931900319013190231903319043190531906319073190831909319103191131912319133191431915319163191731918319193192031921319223192331924319253192631927319283192931930319313193231933319343193531936319373193831939319403194131942319433194431945319463194731948319493195031951319523195331954319553195631957319583195931960319613196231963319643196531966319673196831969319703197131972319733197431975319763197731978319793198031981319823198331984319853198631987319883198931990319913199231993319943199531996319973199831999320003200132002320033200432005320063200732008320093201032011320123201332014320153201632017320183201932020320213202232023320243202532026320273202832029320303203132032320333203432035320363203732038320393204032041320423204332044320453204632047320483204932050320513205232053320543205532056320573205832059320603206132062320633206432065320663206732068320693207032071320723207332074320753207632077320783207932080320813208232083320843208532086320873208832089320903209132092320933209432095320963209732098320993210032101321023210332104321053210632107321083210932110321113211232113321143211532116321173211832119321203212132122321233212432125321263212732128321293213032131321323213332134321353213632137321383213932140321413214232143321443214532146321473214832149321503215132152321533215432155321563215732158321593216032161321623216332164321653216632167321683216932170321713217232173321743217532176321773217832179321803218132182321833218432185321863218732188321893219032191321923219332194321953219632197321983219932200322013220232203322043220532206322073220832209322103221132212322133221432215322163221732218322193222032221322223222332224322253222632227322283222932230322313223232233322343223532236322373223832239322403224132242322433224432245322463224732248322493225032251322523225332254322553225632257322583225932260322613226232263322643226532266322673226832269322703227132272322733227432275322763227732278322793228032281322823228332284322853228632287322883228932290322913229232293322943229532296322973229832299323003230132302323033230432305323063230732308323093231032311323123231332314323153231632317323183231932320323213232232323323243232532326323273232832329323303233132332323333233432335323363233732338323393234032341323423234332344323453234632347323483234932350323513235232353323543235532356323573235832359323603236132362323633236432365323663236732368323693237032371323723237332374323753237632377323783237932380323813238232383323843238532386323873238832389323903239132392323933239432395323963239732398323993240032401324023240332404324053240632407324083240932410324113241232413324143241532416324173241832419324203242132422324233242432425324263242732428324293243032431324323243332434324353243632437324383243932440324413244232443324443244532446324473244832449324503245132452324533245432455324563245732458324593246032461324623246332464324653246632467324683246932470324713247232473324743247532476324773247832479324803248132482324833248432485324863248732488324893249032491324923249332494324953249632497324983249932500325013250232503325043250532506325073250832509325103251132512325133251432515325163251732518325193252032521325223252332524325253252632527325283252932530325313253232533325343253532536325373253832539325403254132542325433254432545325463254732548325493255032551325523255332554325553255632557325583255932560325613256232563325643256532566325673256832569325703257132572325733257432575325763257732578325793258032581325823258332584325853258632587325883258932590325913259232593325943259532596325973259832599326003260132602326033260432605326063260732608326093261032611326123261332614326153261632617326183261932620326213262232623326243262532626326273262832629326303263132632326333263432635326363263732638326393264032641326423264332644326453264632647326483264932650326513265232653326543265532656326573265832659326603266132662326633266432665326663266732668326693267032671326723267332674326753267632677326783267932680326813268232683326843268532686326873268832689326903269132692326933269432695326963269732698326993270032701327023270332704327053270632707327083270932710327113271232713327143271532716327173271832719327203272132722327233272432725327263272732728327293273032731327323273332734327353273632737327383273932740327413274232743327443274532746327473274832749327503275132752327533275432755327563275732758327593276032761327623276332764327653276632767327683276932770327713277232773327743277532776327773277832779327803278132782327833278432785327863278732788327893279032791327923279332794327953279632797327983279932800328013280232803328043280532806328073280832809328103281132812328133281432815328163281732818328193282032821328223282332824328253282632827328283282932830328313283232833328343283532836328373283832839328403284132842328433284432845328463284732848328493285032851328523285332854328553285632857328583285932860328613286232863328643286532866328673286832869328703287132872328733287432875328763287732878328793288032881328823288332884328853288632887328883288932890328913289232893328943289532896328973289832899329003290132902329033290432905329063290732908329093291032911329123291332914329153291632917329183291932920329213292232923329243292532926329273292832929329303293132932329333293432935329363293732938329393294032941329423294332944329453294632947329483294932950329513295232953329543295532956329573295832959329603296132962329633296432965329663296732968329693297032971329723297332974329753297632977329783297932980329813298232983329843298532986329873298832989329903299132992329933299432995329963299732998329993300033001330023300333004330053300633007330083300933010330113301233013330143301533016330173301833019330203302133022330233302433025330263302733028330293303033031330323303333034330353303633037330383303933040330413304233043330443304533046330473304833049330503305133052330533305433055330563305733058330593306033061330623306333064330653306633067330683306933070330713307233073330743307533076330773307833079330803308133082330833308433085330863308733088330893309033091330923309333094330953309633097330983309933100331013310233103331043310533106331073310833109331103311133112331133311433115331163311733118331193312033121331223312333124331253312633127331283312933130331313313233133331343313533136331373313833139331403314133142331433314433145331463314733148331493315033151331523315333154331553315633157331583315933160331613316233163331643316533166331673316833169331703317133172331733317433175331763317733178331793318033181331823318333184331853318633187331883318933190331913319233193331943319533196331973319833199332003320133202332033320433205332063320733208332093321033211332123321333214332153321633217332183321933220332213322233223332243322533226332273322833229332303323133232332333323433235332363323733238332393324033241332423324333244332453324633247332483324933250332513325233253332543325533256332573325833259332603326133262332633326433265332663326733268332693327033271332723327333274332753327633277332783327933280332813328233283332843328533286332873328833289332903329133292332933329433295332963329733298332993330033301333023330333304333053330633307333083330933310333113331233313333143331533316333173331833319333203332133322333233332433325333263332733328333293333033331333323333333334333353333633337333383333933340333413334233343333443334533346333473334833349333503335133352333533335433355333563335733358333593336033361333623336333364333653336633367333683336933370333713337233373333743337533376333773337833379333803338133382333833338433385333863338733388333893339033391333923339333394333953339633397333983339933400334013340233403334043340533406334073340833409334103341133412334133341433415334163341733418334193342033421334223342333424334253342633427334283342933430334313343233433334343343533436334373343833439334403344133442334433344433445334463344733448334493345033451334523345333454334553345633457334583345933460334613346233463334643346533466334673346833469334703347133472334733347433475334763347733478334793348033481334823348333484334853348633487334883348933490334913349233493334943349533496334973349833499335003350133502335033350433505335063350733508335093351033511335123351333514335153351633517335183351933520335213352233523335243352533526335273352833529335303353133532335333353433535335363353733538335393354033541335423354333544335453354633547335483354933550335513355233553335543355533556335573355833559335603356133562335633356433565335663356733568335693357033571335723357333574335753357633577335783357933580335813358233583335843358533586335873358833589335903359133592335933359433595335963359733598335993360033601336023360333604336053360633607336083360933610336113361233613336143361533616336173361833619336203362133622336233362433625336263362733628336293363033631336323363333634336353363633637336383363933640336413364233643336443364533646336473364833649336503365133652336533365433655
  1. From: Matthias Schiffer <mschiffer@universe-factory.net>
  2. Date: Tue, 10 Mar 2015 12:40:53 +0100
  3. Subject: mac80211: update ath10k to compat-wireless-2015-03-05
  4. Taken from http://openwrt.reigndropsfall.net/
  5. diff --git a/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch b/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch
  6. new file mode 100644
  7. index 0000000..1d0c559
  8. --- /dev/null
  9. +++ b/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch
  10. @@ -0,0 +1,271 @@
  11. +commit 08cf42e843f9a7e253502011c81677f61f7e5c42
  12. +Author: Michal Kazior <michal.kazior@tieto.com>
  13. +Date: Wed Jul 16 12:12:15 2014 +0200
  14. +
  15. + mac80211: add support for Rx reordering offloading
  16. +
  17. + Some drivers may be performing most of Tx/Rx
  18. + aggregation on their own (e.g. in firmware)
  19. + including AddBa/DelBa negotiations but may
  20. + otherwise require Rx reordering assistance.
  21. +
  22. + The patch exports 2 new functions for establishing
  23. + Rx aggregation sessions in assumption device
  24. + driver has taken care of the necessary
  25. + negotiations.
  26. +
  27. + Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
  28. + [fix endian bug]
  29. + Signed-off-by: Johannes Berg <johannes.berg@intel.com>
  30. +
  31. +--- a/include/net/mac80211.h
  32. ++++ b/include/net/mac80211.h
  33. +@@ -4481,6 +4481,40 @@ void ieee80211_stop_rx_ba_session(struct
  34. + */
  35. + void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
  36. +
  37. ++/**
  38. ++ * ieee80211_start_rx_ba_session_offl - start a Rx BA session
  39. ++ *
  40. ++ * Some device drivers may offload part of the Rx aggregation flow including
  41. ++ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
  42. ++ * reordering.
  43. ++ *
  44. ++ * Create structures responsible for reordering so device drivers may call here
  45. ++ * when they complete AddBa negotiation.
  46. ++ *
  47. ++ * @vif: &struct ieee80211_vif pointer from the add_interface callback
  48. ++ * @addr: station mac address
  49. ++ * @tid: the rx tid
  50. ++ */
  51. ++void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
  52. ++ const u8 *addr, u16 tid);
  53. ++
  54. ++/**
  55. ++ * ieee80211_stop_rx_ba_session_offl - stop a Rx BA session
  56. ++ *
  57. ++ * Some device drivers may offload part of the Rx aggregation flow including
  58. ++ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
  59. ++ * reordering.
  60. ++ *
  61. ++ * Destroy structures responsible for reordering so device drivers may call here
  62. ++ * when they complete DelBa negotiation.
  63. ++ *
  64. ++ * @vif: &struct ieee80211_vif pointer from the add_interface callback
  65. ++ * @addr: station mac address
  66. ++ * @tid: the rx tid
  67. ++ */
  68. ++void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
  69. ++ const u8 *addr, u16 tid);
  70. ++
  71. + /* Rate control API */
  72. +
  73. + /**
  74. +--- a/net/mac80211/agg-rx.c
  75. ++++ b/net/mac80211/agg-rx.c
  76. +@@ -224,28 +224,15 @@ static void ieee80211_send_addba_resp(st
  77. + ieee80211_tx_skb(sdata, skb);
  78. + }
  79. +
  80. +-void ieee80211_process_addba_request(struct ieee80211_local *local,
  81. +- struct sta_info *sta,
  82. +- struct ieee80211_mgmt *mgmt,
  83. +- size_t len)
  84. ++void __ieee80211_start_rx_ba_session(struct sta_info *sta,
  85. ++ u8 dialog_token, u16 timeout,
  86. ++ u16 start_seq_num, u16 ba_policy, u16 tid,
  87. ++ u16 buf_size, bool tx)
  88. + {
  89. ++ struct ieee80211_local *local = sta->sdata->local;
  90. + struct tid_ampdu_rx *tid_agg_rx;
  91. +- u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
  92. +- u8 dialog_token;
  93. + int ret = -EOPNOTSUPP;
  94. +-
  95. +- /* extract session parameters from addba request frame */
  96. +- dialog_token = mgmt->u.action.u.addba_req.dialog_token;
  97. +- timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
  98. +- start_seq_num =
  99. +- le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
  100. +-
  101. +- capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
  102. +- ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
  103. +- tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
  104. +- buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
  105. +-
  106. +- status = WLAN_STATUS_REQUEST_DECLINED;
  107. ++ u16 status = WLAN_STATUS_REQUEST_DECLINED;
  108. +
  109. + if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
  110. + ht_dbg(sta->sdata,
  111. +@@ -264,7 +251,7 @@ void ieee80211_process_addba_request(str
  112. + status = WLAN_STATUS_INVALID_QOS_PARAM;
  113. + ht_dbg_ratelimited(sta->sdata,
  114. + "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
  115. +- mgmt->sa, tid, ba_policy, buf_size);
  116. ++ sta->sta.addr, tid, ba_policy, buf_size);
  117. + goto end_no_lock;
  118. + }
  119. + /* determine default buffer size */
  120. +@@ -281,7 +268,7 @@ void ieee80211_process_addba_request(str
  121. + if (sta->ampdu_mlme.tid_rx[tid]) {
  122. + ht_dbg_ratelimited(sta->sdata,
  123. + "unexpected AddBA Req from %pM on tid %u\n",
  124. +- mgmt->sa, tid);
  125. ++ sta->sta.addr, tid);
  126. +
  127. + /* delete existing Rx BA session on the same tid */
  128. + ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
  129. +@@ -350,6 +337,74 @@ end:
  130. + mutex_unlock(&sta->ampdu_mlme.mtx);
  131. +
  132. + end_no_lock:
  133. +- ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
  134. +- dialog_token, status, 1, buf_size, timeout);
  135. ++ if (tx)
  136. ++ ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
  137. ++ dialog_token, status, 1, buf_size,
  138. ++ timeout);
  139. ++}
  140. ++
  141. ++void ieee80211_process_addba_request(struct ieee80211_local *local,
  142. ++ struct sta_info *sta,
  143. ++ struct ieee80211_mgmt *mgmt,
  144. ++ size_t len)
  145. ++{
  146. ++ u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num;
  147. ++ u8 dialog_token;
  148. ++
  149. ++ /* extract session parameters from addba request frame */
  150. ++ dialog_token = mgmt->u.action.u.addba_req.dialog_token;
  151. ++ timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
  152. ++ start_seq_num =
  153. ++ le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
  154. ++
  155. ++ capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
  156. ++ ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
  157. ++ tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
  158. ++ buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
  159. ++
  160. ++ __ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
  161. ++ start_seq_num, ba_policy, tid,
  162. ++ buf_size, true);
  163. ++}
  164. ++
  165. ++void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
  166. ++ const u8 *addr, u16 tid)
  167. ++{
  168. ++ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
  169. ++ struct ieee80211_local *local = sdata->local;
  170. ++ struct ieee80211_rx_agg *rx_agg;
  171. ++ struct sk_buff *skb = dev_alloc_skb(0);
  172. ++
  173. ++ if (unlikely(!skb))
  174. ++ return;
  175. ++
  176. ++ rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
  177. ++ memcpy(&rx_agg->addr, addr, ETH_ALEN);
  178. ++ rx_agg->tid = tid;
  179. ++
  180. ++ skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_START;
  181. ++ skb_queue_tail(&sdata->skb_queue, skb);
  182. ++ ieee80211_queue_work(&local->hw, &sdata->work);
  183. ++}
  184. ++EXPORT_SYMBOL(ieee80211_start_rx_ba_session_offl);
  185. ++
  186. ++void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
  187. ++ const u8 *addr, u16 tid)
  188. ++{
  189. ++ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
  190. ++ struct ieee80211_local *local = sdata->local;
  191. ++ struct ieee80211_rx_agg *rx_agg;
  192. ++ struct sk_buff *skb = dev_alloc_skb(0);
  193. ++
  194. ++ if (unlikely(!skb))
  195. ++ return;
  196. ++
  197. ++ rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
  198. ++ memcpy(&rx_agg->addr, addr, ETH_ALEN);
  199. ++ rx_agg->tid = tid;
  200. ++
  201. ++ skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_STOP;
  202. ++ skb_queue_tail(&sdata->skb_queue, skb);
  203. ++ ieee80211_queue_work(&local->hw, &sdata->work);
  204. + }
  205. ++EXPORT_SYMBOL(ieee80211_stop_rx_ba_session_offl);
  206. +--- a/net/mac80211/ieee80211_i.h
  207. ++++ b/net/mac80211/ieee80211_i.h
  208. +@@ -902,10 +902,17 @@ ieee80211_vif_get_shift(struct ieee80211
  209. + return shift;
  210. + }
  211. +
  212. ++struct ieee80211_rx_agg {
  213. ++ u8 addr[ETH_ALEN];
  214. ++ u16 tid;
  215. ++};
  216. ++
  217. + enum sdata_queue_type {
  218. + IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
  219. + IEEE80211_SDATA_QUEUE_AGG_START = 1,
  220. + IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
  221. ++ IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
  222. ++ IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
  223. + };
  224. +
  225. + enum {
  226. +@@ -1554,6 +1561,10 @@ void ___ieee80211_stop_rx_ba_session(str
  227. + u16 initiator, u16 reason, bool stop);
  228. + void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
  229. + u16 initiator, u16 reason, bool stop);
  230. ++void __ieee80211_start_rx_ba_session(struct sta_info *sta,
  231. ++ u8 dialog_token, u16 timeout,
  232. ++ u16 start_seq_num, u16 ba_policy, u16 tid,
  233. ++ u16 buf_size, bool tx);
  234. + void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
  235. + enum ieee80211_agg_stop_reason reason);
  236. + void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
  237. +--- a/net/mac80211/iface.c
  238. ++++ b/net/mac80211/iface.c
  239. +@@ -1154,6 +1154,7 @@ static void ieee80211_iface_work(struct
  240. + struct sk_buff *skb;
  241. + struct sta_info *sta;
  242. + struct ieee80211_ra_tid *ra_tid;
  243. ++ struct ieee80211_rx_agg *rx_agg;
  244. +
  245. + if (!ieee80211_sdata_running(sdata))
  246. + return;
  247. +@@ -1181,6 +1182,34 @@ static void ieee80211_iface_work(struct
  248. + ra_tid = (void *)&skb->cb;
  249. + ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
  250. + ra_tid->tid);
  251. ++ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
  252. ++ rx_agg = (void *)&skb->cb;
  253. ++ mutex_lock(&local->sta_mtx);
  254. ++ sta = sta_info_get_bss(sdata, rx_agg->addr);
  255. ++ if (sta) {
  256. ++ u16 last_seq;
  257. ++
  258. ++ last_seq = le16_to_cpu(
  259. ++ sta->last_seq_ctrl[rx_agg->tid]);
  260. ++
  261. ++ __ieee80211_start_rx_ba_session(sta,
  262. ++ 0, 0,
  263. ++ ieee80211_sn_inc(last_seq),
  264. ++ 1, rx_agg->tid,
  265. ++ IEEE80211_MAX_AMPDU_BUF,
  266. ++ false);
  267. ++ }
  268. ++ mutex_unlock(&local->sta_mtx);
  269. ++ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) {
  270. ++ rx_agg = (void *)&skb->cb;
  271. ++ mutex_lock(&local->sta_mtx);
  272. ++ sta = sta_info_get_bss(sdata, rx_agg->addr);
  273. ++ if (sta)
  274. ++ __ieee80211_stop_rx_ba_session(sta,
  275. ++ rx_agg->tid,
  276. ++ WLAN_BACK_RECIPIENT, 0,
  277. ++ false);
  278. ++ mutex_unlock(&local->sta_mtx);
  279. + } else if (ieee80211_is_action(mgmt->frame_control) &&
  280. + mgmt->u.action.category == WLAN_CATEGORY_BACK) {
  281. + int len = skb->len;
  282. diff --git a/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch b/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch
  283. new file mode 100644
  284. index 0000000..d0c1bbd
  285. --- /dev/null
  286. +++ b/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch
  287. @@ -0,0 +1,192 @@
  288. +--- a/drivers/net/wireless/ath/ath9k/spectral.h
  289. ++++ b/drivers/net/wireless/ath/ath9k/spectral.h
  290. +@@ -17,6 +17,8 @@
  291. + #ifndef SPECTRAL_H
  292. + #define SPECTRAL_H
  293. +
  294. ++#include "../spectral_common.h"
  295. ++
  296. + /* enum spectral_mode:
  297. + *
  298. + * @SPECTRAL_DISABLED: spectral mode is disabled
  299. +@@ -54,8 +56,6 @@ struct ath_ht20_mag_info {
  300. + u8 max_exp;
  301. + } __packed;
  302. +
  303. +-#define SPECTRAL_HT20_NUM_BINS 56
  304. +-
  305. + /* WARNING: don't actually use this struct! MAC may vary the amount of
  306. + * data by -1/+2. This struct is for reference only.
  307. + */
  308. +@@ -83,8 +83,6 @@ struct ath_ht20_40_mag_info {
  309. + u8 max_exp;
  310. + } __packed;
  311. +
  312. +-#define SPECTRAL_HT20_40_NUM_BINS 128
  313. +-
  314. + /* WARNING: don't actually use this struct! MAC may vary the amount of
  315. + * data. This struct is for reference only.
  316. + */
  317. +@@ -125,71 +123,6 @@ static inline u8 spectral_bitmap_weight(
  318. + return bins[0] & 0x3f;
  319. + }
  320. +
  321. +-/* FFT sample format given to userspace via debugfs.
  322. +- *
  323. +- * Please keep the type/length at the front position and change
  324. +- * other fields after adding another sample type
  325. +- *
  326. +- * TODO: this might need rework when switching to nl80211-based
  327. +- * interface.
  328. +- */
  329. +-enum ath_fft_sample_type {
  330. +- ATH_FFT_SAMPLE_HT20 = 1,
  331. +- ATH_FFT_SAMPLE_HT20_40,
  332. +-};
  333. +-
  334. +-struct fft_sample_tlv {
  335. +- u8 type; /* see ath_fft_sample */
  336. +- __be16 length;
  337. +- /* type dependent data follows */
  338. +-} __packed;
  339. +-
  340. +-struct fft_sample_ht20 {
  341. +- struct fft_sample_tlv tlv;
  342. +-
  343. +- u8 max_exp;
  344. +-
  345. +- __be16 freq;
  346. +- s8 rssi;
  347. +- s8 noise;
  348. +-
  349. +- __be16 max_magnitude;
  350. +- u8 max_index;
  351. +- u8 bitmap_weight;
  352. +-
  353. +- __be64 tsf;
  354. +-
  355. +- u8 data[SPECTRAL_HT20_NUM_BINS];
  356. +-} __packed;
  357. +-
  358. +-struct fft_sample_ht20_40 {
  359. +- struct fft_sample_tlv tlv;
  360. +-
  361. +- u8 channel_type;
  362. +- __be16 freq;
  363. +-
  364. +- s8 lower_rssi;
  365. +- s8 upper_rssi;
  366. +-
  367. +- __be64 tsf;
  368. +-
  369. +- s8 lower_noise;
  370. +- s8 upper_noise;
  371. +-
  372. +- __be16 lower_max_magnitude;
  373. +- __be16 upper_max_magnitude;
  374. +-
  375. +- u8 lower_max_index;
  376. +- u8 upper_max_index;
  377. +-
  378. +- u8 lower_bitmap_weight;
  379. +- u8 upper_bitmap_weight;
  380. +-
  381. +- u8 max_exp;
  382. +-
  383. +- u8 data[SPECTRAL_HT20_40_NUM_BINS];
  384. +-} __packed;
  385. +-
  386. + void ath9k_spectral_init_debug(struct ath_softc *sc);
  387. + void ath9k_spectral_deinit_debug(struct ath_softc *sc);
  388. +
  389. +--- /dev/null
  390. ++++ b/drivers/net/wireless/ath/spectral_common.h
  391. +@@ -0,0 +1,88 @@
  392. ++/*
  393. ++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
  394. ++ *
  395. ++ * Permission to use, copy, modify, and/or distribute this software for any
  396. ++ * purpose with or without fee is hereby granted, provided that the above
  397. ++ * copyright notice and this permission notice appear in all copies.
  398. ++ *
  399. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  400. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  401. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  402. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  403. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  404. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  405. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  406. ++ */
  407. ++
  408. ++#ifndef SPECTRAL_COMMON_H
  409. ++#define SPECTRAL_COMMON_H
  410. ++
  411. ++#define SPECTRAL_HT20_NUM_BINS 56
  412. ++#define SPECTRAL_HT20_40_NUM_BINS 128
  413. ++
  414. ++/* FFT sample format given to userspace via debugfs.
  415. ++ *
  416. ++ * Please keep the type/length at the front position and change
  417. ++ * other fields after adding another sample type
  418. ++ *
  419. ++ * TODO: this might need rework when switching to nl80211-based
  420. ++ * interface.
  421. ++ */
  422. ++enum ath_fft_sample_type {
  423. ++ ATH_FFT_SAMPLE_HT20 = 1,
  424. ++ ATH_FFT_SAMPLE_HT20_40,
  425. ++};
  426. ++
  427. ++struct fft_sample_tlv {
  428. ++ u8 type; /* see ath_fft_sample */
  429. ++ __be16 length;
  430. ++ /* type dependent data follows */
  431. ++} __packed;
  432. ++
  433. ++struct fft_sample_ht20 {
  434. ++ struct fft_sample_tlv tlv;
  435. ++
  436. ++ u8 max_exp;
  437. ++
  438. ++ __be16 freq;
  439. ++ s8 rssi;
  440. ++ s8 noise;
  441. ++
  442. ++ __be16 max_magnitude;
  443. ++ u8 max_index;
  444. ++ u8 bitmap_weight;
  445. ++
  446. ++ __be64 tsf;
  447. ++
  448. ++ u8 data[SPECTRAL_HT20_NUM_BINS];
  449. ++} __packed;
  450. ++
  451. ++struct fft_sample_ht20_40 {
  452. ++ struct fft_sample_tlv tlv;
  453. ++
  454. ++ u8 channel_type;
  455. ++ __be16 freq;
  456. ++
  457. ++ s8 lower_rssi;
  458. ++ s8 upper_rssi;
  459. ++
  460. ++ __be64 tsf;
  461. ++
  462. ++ s8 lower_noise;
  463. ++ s8 upper_noise;
  464. ++
  465. ++ __be16 lower_max_magnitude;
  466. ++ __be16 upper_max_magnitude;
  467. ++
  468. ++ u8 lower_max_index;
  469. ++ u8 upper_max_index;
  470. ++
  471. ++ u8 lower_bitmap_weight;
  472. ++ u8 upper_bitmap_weight;
  473. ++
  474. ++ u8 max_exp;
  475. ++
  476. ++ u8 data[SPECTRAL_HT20_40_NUM_BINS];
  477. ++} __packed;
  478. ++
  479. ++#endif /* SPECTRAL_COMMON_H */
  480. diff --git a/package/kernel/mac80211/patches/919-update-ath10k.patch b/package/kernel/mac80211/patches/919-update-ath10k.patch
  481. new file mode 100644
  482. index 0000000..45fccb8
  483. --- /dev/null
  484. +++ b/package/kernel/mac80211/patches/919-update-ath10k.patch
  485. @@ -0,0 +1,33023 @@
  486. +--- a/drivers/net/wireless/ath/ath10k/Kconfig
  487. ++++ b/drivers/net/wireless/ath/ath10k/Kconfig
  488. +@@ -26,13 +26,15 @@ config ATH10K_DEBUG
  489. +
  490. + config ATH10K_DEBUGFS
  491. + bool "Atheros ath10k debugfs support"
  492. +- depends on ATH10K
  493. ++ depends on ATH10K && DEBUG_FS
  494. ++ depends on RELAY
  495. + ---help---
  496. + Enabled debugfs support
  497. +
  498. + If unsure, say Y to make it easier to debug problems.
  499. +
  500. + config ATH10K_TRACING
  501. ++ depends on !KERNEL_3_4
  502. + bool "Atheros ath10k tracing support"
  503. + depends on ATH10K
  504. + depends on EVENT_TRACING
  505. +--- a/drivers/net/wireless/ath/ath10k/Makefile
  506. ++++ b/drivers/net/wireless/ath/ath10k/Makefile
  507. +@@ -8,9 +8,15 @@ ath10k_core-y += mac.o \
  508. + htt_tx.o \
  509. + txrx.o \
  510. + wmi.o \
  511. +- bmi.o
  512. ++ wmi-tlv.o \
  513. ++ bmi.o \
  514. ++ hw.o
  515. +
  516. ++ath10k_core-$(CPTCFG_ATH10K_DEBUGFS) += spectral.o
  517. ++ath10k_core-$(CPTCFG_NL80211_TESTMODE) += testmode.o
  518. + ath10k_core-$(CPTCFG_ATH10K_TRACING) += trace.o
  519. ++ath10k_core-$(CONFIG_THERMAL) += thermal.o
  520. ++ath10k_core-$(CPTCFG_MAC80211_DEBUGFS) += debugfs_sta.o
  521. +
  522. + obj-$(CPTCFG_ATH10K_PCI) += ath10k_pci.o
  523. + ath10k_pci-y += pci.o \
  524. +--- a/drivers/net/wireless/ath/ath10k/bmi.c
  525. ++++ b/drivers/net/wireless/ath/ath10k/bmi.c
  526. +@@ -22,7 +22,7 @@
  527. +
  528. + void ath10k_bmi_start(struct ath10k *ar)
  529. + {
  530. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
  531. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
  532. +
  533. + ar->bmi.done_sent = false;
  534. + }
  535. +@@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
  536. + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
  537. + int ret;
  538. +
  539. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
  540. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
  541. +
  542. + if (ar->bmi.done_sent) {
  543. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
  544. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
  545. + return 0;
  546. + }
  547. +
  548. +@@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar)
  549. +
  550. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
  551. + if (ret) {
  552. +- ath10k_warn("unable to write to the device: %d\n", ret);
  553. ++ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
  554. + return ret;
  555. + }
  556. +
  557. +@@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct at
  558. + u32 resplen = sizeof(resp.get_target_info);
  559. + int ret;
  560. +
  561. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
  562. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
  563. +
  564. + if (ar->bmi.done_sent) {
  565. +- ath10k_warn("BMI Get Target Info Command disallowed\n");
  566. ++ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
  567. + return -EBUSY;
  568. + }
  569. +
  570. +@@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct at
  571. +
  572. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
  573. + if (ret) {
  574. +- ath10k_warn("unable to get target info from device\n");
  575. ++ ath10k_warn(ar, "unable to get target info from device\n");
  576. + return ret;
  577. + }
  578. +
  579. + if (resplen < sizeof(resp.get_target_info)) {
  580. +- ath10k_warn("invalid get_target_info response length (%d)\n",
  581. ++ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
  582. + resplen);
  583. + return -EIO;
  584. + }
  585. +@@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k
  586. + u32 rxlen;
  587. + int ret;
  588. +
  589. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
  590. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
  591. + address, length);
  592. +
  593. + if (ar->bmi.done_sent) {
  594. +- ath10k_warn("command disallowed\n");
  595. ++ ath10k_warn(ar, "command disallowed\n");
  596. + return -EBUSY;
  597. + }
  598. +
  599. +@@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k
  600. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
  601. + &resp, &rxlen);
  602. + if (ret) {
  603. +- ath10k_warn("unable to read from the device (%d)\n",
  604. ++ ath10k_warn(ar, "unable to read from the device (%d)\n",
  605. + ret);
  606. + return ret;
  607. + }
  608. +@@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10
  609. + u32 txlen;
  610. + int ret;
  611. +
  612. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
  613. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
  614. + address, length);
  615. +
  616. + if (ar->bmi.done_sent) {
  617. +- ath10k_warn("command disallowed\n");
  618. ++ ath10k_warn(ar, "command disallowed\n");
  619. + return -EBUSY;
  620. + }
  621. +
  622. +@@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10
  623. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
  624. + NULL, NULL);
  625. + if (ret) {
  626. +- ath10k_warn("unable to write to the device (%d)\n",
  627. ++ ath10k_warn(ar, "unable to write to the device (%d)\n",
  628. + ret);
  629. + return ret;
  630. + }
  631. +@@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar
  632. + u32 resplen = sizeof(resp.execute);
  633. + int ret;
  634. +
  635. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
  636. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
  637. + address, param);
  638. +
  639. + if (ar->bmi.done_sent) {
  640. +- ath10k_warn("command disallowed\n");
  641. ++ ath10k_warn(ar, "command disallowed\n");
  642. + return -EBUSY;
  643. + }
  644. +
  645. +@@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar
  646. +
  647. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
  648. + if (ret) {
  649. +- ath10k_warn("unable to read from the device\n");
  650. ++ ath10k_warn(ar, "unable to read from the device\n");
  651. + return ret;
  652. + }
  653. +
  654. + if (resplen < sizeof(resp.execute)) {
  655. +- ath10k_warn("invalid execute response length (%d)\n",
  656. ++ ath10k_warn(ar, "invalid execute response length (%d)\n",
  657. + resplen);
  658. + return -EIO;
  659. + }
  660. +
  661. + *result = __le32_to_cpu(resp.execute.result);
  662. +
  663. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
  664. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
  665. +
  666. + return 0;
  667. + }
  668. +@@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar
  669. + u32 txlen;
  670. + int ret;
  671. +
  672. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
  673. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
  674. + buffer, length);
  675. +
  676. + if (ar->bmi.done_sent) {
  677. +- ath10k_warn("command disallowed\n");
  678. ++ ath10k_warn(ar, "command disallowed\n");
  679. + return -EBUSY;
  680. + }
  681. +
  682. +@@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar
  683. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
  684. + NULL, NULL);
  685. + if (ret) {
  686. +- ath10k_warn("unable to write to the device\n");
  687. ++ ath10k_warn(ar, "unable to write to the device\n");
  688. + return ret;
  689. + }
  690. +
  691. +@@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct at
  692. + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
  693. + int ret;
  694. +
  695. +- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
  696. ++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
  697. + address);
  698. +
  699. + if (ar->bmi.done_sent) {
  700. +- ath10k_warn("command disallowed\n");
  701. ++ ath10k_warn(ar, "command disallowed\n");
  702. + return -EBUSY;
  703. + }
  704. +
  705. +@@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct at
  706. +
  707. + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
  708. + if (ret) {
  709. +- ath10k_warn("unable to Start LZ Stream to the device\n");
  710. ++ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
  711. + return ret;
  712. + }
  713. +
  714. +@@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath1
  715. + u32 trailer_len = length - head_len;
  716. + int ret;
  717. +
  718. +- ath10k_dbg(ATH10K_DBG_BMI,
  719. ++ ath10k_dbg(ar, ATH10K_DBG_BMI,
  720. + "bmi fast download address 0x%x buffer 0x%p length %d\n",
  721. + address, buffer, length);
  722. +
  723. +--- a/drivers/net/wireless/ath/ath10k/bmi.h
  724. ++++ b/drivers/net/wireless/ath/ath10k/bmi.h
  725. +@@ -177,7 +177,6 @@ struct bmi_target_info {
  726. + u32 type;
  727. + };
  728. +
  729. +-
  730. + /* in msec */
  731. + #define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
  732. +
  733. +@@ -201,7 +200,8 @@ int ath10k_bmi_write_memory(struct ath10
  734. + \
  735. + addr = host_interest_item_address(HI_ITEM(item)); \
  736. + ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
  737. +- *val = __le32_to_cpu(tmp); \
  738. ++ if (!ret) \
  739. ++ *val = __le32_to_cpu(tmp); \
  740. + ret; \
  741. + })
  742. +
  743. +--- a/drivers/net/wireless/ath/ath10k/ce.c
  744. ++++ b/drivers/net/wireless/ath/ath10k/ce.c
  745. +@@ -260,7 +260,6 @@ static inline void ath10k_ce_engine_int_
  746. + ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
  747. + }
  748. +
  749. +-
  750. + /*
  751. + * Guts of ath10k_ce_send, used by both ath10k_ce_send and
  752. + * ath10k_ce_sendlist_send.
  753. +@@ -284,13 +283,9 @@ int ath10k_ce_send_nolock(struct ath10k_
  754. + int ret = 0;
  755. +
  756. + if (nbytes > ce_state->src_sz_max)
  757. +- ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
  758. ++ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
  759. + __func__, nbytes, ce_state->src_sz_max);
  760. +
  761. +- ret = ath10k_pci_wake(ar);
  762. +- if (ret)
  763. +- return ret;
  764. +-
  765. + if (unlikely(CE_RING_DELTA(nentries_mask,
  766. + write_index, sw_index - 1) <= 0)) {
  767. + ret = -ENOSR;
  768. +@@ -325,10 +320,36 @@ int ath10k_ce_send_nolock(struct ath10k_
  769. +
  770. + src_ring->write_index = write_index;
  771. + exit:
  772. +- ath10k_pci_sleep(ar);
  773. + return ret;
  774. + }
  775. +
  776. ++void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
  777. ++{
  778. ++ struct ath10k *ar = pipe->ar;
  779. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  780. ++ struct ath10k_ce_ring *src_ring = pipe->src_ring;
  781. ++ u32 ctrl_addr = pipe->ctrl_addr;
  782. ++
  783. ++ lockdep_assert_held(&ar_pci->ce_lock);
  784. ++
  785. ++ /*
  786. ++ * This function must be called only if there is an incomplete
  787. ++ * scatter-gather transfer (before index register is updated)
  788. ++ * that needs to be cleaned up.
  789. ++ */
  790. ++ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
  791. ++ return;
  792. ++
  793. ++ if (WARN_ON_ONCE(src_ring->write_index ==
  794. ++ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
  795. ++ return;
  796. ++
  797. ++ src_ring->write_index--;
  798. ++ src_ring->write_index &= src_ring->nentries_mask;
  799. ++
  800. ++ src_ring->per_transfer_context[src_ring->write_index] = NULL;
  801. ++}
  802. ++
  803. + int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
  804. + void *per_transfer_context,
  805. + u32 buffer,
  806. +@@ -363,49 +384,56 @@ int ath10k_ce_num_free_src_entries(struc
  807. + return delta;
  808. + }
  809. +
  810. +-int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
  811. +- void *per_recv_context,
  812. +- u32 buffer)
  813. ++int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
  814. + {
  815. +- struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  816. +- u32 ctrl_addr = ce_state->ctrl_addr;
  817. +- struct ath10k *ar = ce_state->ar;
  818. ++ struct ath10k *ar = pipe->ar;
  819. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  820. ++ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  821. + unsigned int nentries_mask = dest_ring->nentries_mask;
  822. +- unsigned int write_index;
  823. +- unsigned int sw_index;
  824. +- int ret;
  825. ++ unsigned int write_index = dest_ring->write_index;
  826. ++ unsigned int sw_index = dest_ring->sw_index;
  827. +
  828. +- spin_lock_bh(&ar_pci->ce_lock);
  829. +- write_index = dest_ring->write_index;
  830. +- sw_index = dest_ring->sw_index;
  831. ++ lockdep_assert_held(&ar_pci->ce_lock);
  832. +
  833. +- ret = ath10k_pci_wake(ar);
  834. +- if (ret)
  835. +- goto out;
  836. ++ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  837. ++}
  838. +
  839. +- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
  840. +- struct ce_desc *base = dest_ring->base_addr_owner_space;
  841. +- struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  842. ++int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  843. ++{
  844. ++ struct ath10k *ar = pipe->ar;
  845. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  846. ++ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
  847. ++ unsigned int nentries_mask = dest_ring->nentries_mask;
  848. ++ unsigned int write_index = dest_ring->write_index;
  849. ++ unsigned int sw_index = dest_ring->sw_index;
  850. ++ struct ce_desc *base = dest_ring->base_addr_owner_space;
  851. ++ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  852. ++ u32 ctrl_addr = pipe->ctrl_addr;
  853. +
  854. +- /* Update destination descriptor */
  855. +- desc->addr = __cpu_to_le32(buffer);
  856. +- desc->nbytes = 0;
  857. ++ lockdep_assert_held(&ar_pci->ce_lock);
  858. +
  859. +- dest_ring->per_transfer_context[write_index] =
  860. +- per_recv_context;
  861. ++ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
  862. ++ return -EIO;
  863. +
  864. +- /* Update Destination Ring Write Index */
  865. +- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  866. +- ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  867. +- dest_ring->write_index = write_index;
  868. +- ret = 0;
  869. +- } else {
  870. +- ret = -EIO;
  871. +- }
  872. +- ath10k_pci_sleep(ar);
  873. ++ desc->addr = __cpu_to_le32(paddr);
  874. ++ desc->nbytes = 0;
  875. ++
  876. ++ dest_ring->per_transfer_context[write_index] = ctx;
  877. ++ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  878. ++ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  879. ++ dest_ring->write_index = write_index;
  880. ++
  881. ++ return 0;
  882. ++}
  883. +
  884. +-out:
  885. ++int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  886. ++{
  887. ++ struct ath10k *ar = pipe->ar;
  888. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  889. ++ int ret;
  890. ++
  891. ++ spin_lock_bh(&ar_pci->ce_lock);
  892. ++ ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
  893. + spin_unlock_bh(&ar_pci->ce_lock);
  894. +
  895. + return ret;
  896. +@@ -415,12 +443,12 @@ out:
  897. + * Guts of ath10k_ce_completed_recv_next.
  898. + * The caller takes responsibility for any necessary locking.
  899. + */
  900. +-static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  901. +- void **per_transfer_contextp,
  902. +- u32 *bufferp,
  903. +- unsigned int *nbytesp,
  904. +- unsigned int *transfer_idp,
  905. +- unsigned int *flagsp)
  906. ++int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  907. ++ void **per_transfer_contextp,
  908. ++ u32 *bufferp,
  909. ++ unsigned int *nbytesp,
  910. ++ unsigned int *transfer_idp,
  911. ++ unsigned int *flagsp)
  912. + {
  913. + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  914. + unsigned int nentries_mask = dest_ring->nentries_mask;
  915. +@@ -530,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct at
  916. +
  917. + /* sanity */
  918. + dest_ring->per_transfer_context[sw_index] = NULL;
  919. ++ desc->nbytes = 0;
  920. +
  921. + /* Update sw_index */
  922. + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  923. +@@ -548,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct at
  924. + * Guts of ath10k_ce_completed_send_next.
  925. + * The caller takes responsibility for any necessary locking.
  926. + */
  927. +-static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  928. +- void **per_transfer_contextp,
  929. +- u32 *bufferp,
  930. +- unsigned int *nbytesp,
  931. +- unsigned int *transfer_idp)
  932. ++int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  933. ++ void **per_transfer_contextp,
  934. ++ u32 *bufferp,
  935. ++ unsigned int *nbytesp,
  936. ++ unsigned int *transfer_idp)
  937. + {
  938. + struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  939. + u32 ctrl_addr = ce_state->ctrl_addr;
  940. +@@ -561,7 +590,6 @@ static int ath10k_ce_completed_send_next
  941. + unsigned int sw_index = src_ring->sw_index;
  942. + struct ce_desc *sdesc, *sbase;
  943. + unsigned int read_index;
  944. +- int ret;
  945. +
  946. + if (src_ring->hw_index == sw_index) {
  947. + /*
  948. +@@ -572,20 +600,17 @@ static int ath10k_ce_completed_send_next
  949. + * value of the HW index has become stale.
  950. + */
  951. +
  952. +- ret = ath10k_pci_wake(ar);
  953. +- if (ret)
  954. +- return ret;
  955. +-
  956. +- src_ring->hw_index =
  957. +- ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  958. +- src_ring->hw_index &= nentries_mask;
  959. ++ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  960. ++ if (read_index == 0xffffffff)
  961. ++ return -ENODEV;
  962. +
  963. +- ath10k_pci_sleep(ar);
  964. ++ read_index &= nentries_mask;
  965. ++ src_ring->hw_index = read_index;
  966. + }
  967. +
  968. + read_index = src_ring->hw_index;
  969. +
  970. +- if ((read_index == sw_index) || (read_index == 0xffffffff))
  971. ++ if (read_index == sw_index)
  972. + return -EIO;
  973. +
  974. + sbase = src_ring->shadow_base;
  975. +@@ -701,11 +726,6 @@ void ath10k_ce_per_engine_service(struct
  976. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  977. + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  978. + u32 ctrl_addr = ce_state->ctrl_addr;
  979. +- int ret;
  980. +-
  981. +- ret = ath10k_pci_wake(ar);
  982. +- if (ret)
  983. +- return;
  984. +
  985. + spin_lock_bh(&ar_pci->ce_lock);
  986. +
  987. +@@ -730,7 +750,6 @@ void ath10k_ce_per_engine_service(struct
  988. + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
  989. +
  990. + spin_unlock_bh(&ar_pci->ce_lock);
  991. +- ath10k_pci_sleep(ar);
  992. + }
  993. +
  994. + /*
  995. +@@ -741,13 +760,9 @@ void ath10k_ce_per_engine_service(struct
  996. +
  997. + void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  998. + {
  999. +- int ce_id, ret;
  1000. ++ int ce_id;
  1001. + u32 intr_summary;
  1002. +
  1003. +- ret = ath10k_pci_wake(ar);
  1004. +- if (ret)
  1005. +- return;
  1006. +-
  1007. + intr_summary = CE_INTERRUPT_SUMMARY(ar);
  1008. +
  1009. + for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
  1010. +@@ -759,8 +774,6 @@ void ath10k_ce_per_engine_service_any(st
  1011. +
  1012. + ath10k_ce_per_engine_service(ar, ce_id);
  1013. + }
  1014. +-
  1015. +- ath10k_pci_sleep(ar);
  1016. + }
  1017. +
  1018. + /*
  1019. +@@ -770,16 +783,11 @@ void ath10k_ce_per_engine_service_any(st
  1020. + *
  1021. + * Called with ce_lock held.
  1022. + */
  1023. +-static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
  1024. +- int disable_copy_compl_intr)
  1025. ++static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
  1026. + {
  1027. + u32 ctrl_addr = ce_state->ctrl_addr;
  1028. + struct ath10k *ar = ce_state->ar;
  1029. +- int ret;
  1030. +-
  1031. +- ret = ath10k_pci_wake(ar);
  1032. +- if (ret)
  1033. +- return;
  1034. ++ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
  1035. +
  1036. + if ((!disable_copy_compl_intr) &&
  1037. + (ce_state->send_cb || ce_state->recv_cb))
  1038. +@@ -788,54 +796,33 @@ static void ath10k_ce_per_engine_handler
  1039. + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  1040. +
  1041. + ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  1042. +-
  1043. +- ath10k_pci_sleep(ar);
  1044. + }
  1045. +
  1046. + int ath10k_ce_disable_interrupts(struct ath10k *ar)
  1047. + {
  1048. +- int ce_id, ret;
  1049. +-
  1050. +- ret = ath10k_pci_wake(ar);
  1051. +- if (ret)
  1052. +- return ret;
  1053. ++ int ce_id;
  1054. +
  1055. + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
  1056. +- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  1057. ++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1058. +
  1059. + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  1060. + ath10k_ce_error_intr_disable(ar, ctrl_addr);
  1061. + ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  1062. + }
  1063. +
  1064. +- ath10k_pci_sleep(ar);
  1065. +-
  1066. + return 0;
  1067. + }
  1068. +
  1069. +-void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
  1070. +- void (*send_cb)(struct ath10k_ce_pipe *),
  1071. +- int disable_interrupts)
  1072. ++void ath10k_ce_enable_interrupts(struct ath10k *ar)
  1073. + {
  1074. +- struct ath10k *ar = ce_state->ar;
  1075. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1076. ++ int ce_id;
  1077. +
  1078. +- spin_lock_bh(&ar_pci->ce_lock);
  1079. +- ce_state->send_cb = send_cb;
  1080. +- ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
  1081. +- spin_unlock_bh(&ar_pci->ce_lock);
  1082. +-}
  1083. +-
  1084. +-void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
  1085. +- void (*recv_cb)(struct ath10k_ce_pipe *))
  1086. +-{
  1087. +- struct ath10k *ar = ce_state->ar;
  1088. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1089. +-
  1090. +- spin_lock_bh(&ar_pci->ce_lock);
  1091. +- ce_state->recv_cb = recv_cb;
  1092. +- ath10k_ce_per_engine_handler_adjust(ce_state, 0);
  1093. +- spin_unlock_bh(&ar_pci->ce_lock);
  1094. ++ /* Skip the last copy engine, CE7 the diagnostic window, as that
  1095. ++ * uses polling and isn't initialized for interrupts.
  1096. ++ */
  1097. ++ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
  1098. ++ ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
  1099. + }
  1100. +
  1101. + static int ath10k_ce_init_src_ring(struct ath10k *ar,
  1102. +@@ -845,12 +832,12 @@ static int ath10k_ce_init_src_ring(struc
  1103. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1104. + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  1105. + struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  1106. +- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
  1107. ++ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1108. +
  1109. + nentries = roundup_pow_of_two(attr->src_nentries);
  1110. +
  1111. +- memset(src_ring->per_transfer_context, 0,
  1112. +- nentries * sizeof(*src_ring->per_transfer_context));
  1113. ++ memset(src_ring->base_addr_owner_space, 0,
  1114. ++ nentries * sizeof(struct ce_desc));
  1115. +
  1116. + src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  1117. + src_ring->sw_index &= src_ring->nentries_mask;
  1118. +@@ -868,7 +855,7 @@ static int ath10k_ce_init_src_ring(struc
  1119. + ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
  1120. + ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
  1121. +
  1122. +- ath10k_dbg(ATH10K_DBG_BOOT,
  1123. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1124. + "boot init ce src ring id %d entries %d base_addr %p\n",
  1125. + ce_id, nentries, src_ring->base_addr_owner_space);
  1126. +
  1127. +@@ -882,12 +869,12 @@ static int ath10k_ce_init_dest_ring(stru
  1128. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1129. + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  1130. + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  1131. +- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
  1132. ++ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1133. +
  1134. + nentries = roundup_pow_of_two(attr->dest_nentries);
  1135. +
  1136. +- memset(dest_ring->per_transfer_context, 0,
  1137. +- nentries * sizeof(*dest_ring->per_transfer_context));
  1138. ++ memset(dest_ring->base_addr_owner_space, 0,
  1139. ++ nentries * sizeof(struct ce_desc));
  1140. +
  1141. + dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
  1142. + dest_ring->sw_index &= dest_ring->nentries_mask;
  1143. +@@ -902,7 +889,7 @@ static int ath10k_ce_init_dest_ring(stru
  1144. + ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
  1145. + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
  1146. +
  1147. +- ath10k_dbg(ATH10K_DBG_BOOT,
  1148. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1149. + "boot ce dest ring id %d entries %d base_addr %p\n",
  1150. + ce_id, nentries, dest_ring->base_addr_owner_space);
  1151. +
  1152. +@@ -1039,59 +1026,32 @@ ath10k_ce_alloc_dest_ring(struct ath10k
  1153. + int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
  1154. + const struct ce_attr *attr)
  1155. + {
  1156. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1157. +- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  1158. + int ret;
  1159. +
  1160. +- /*
  1161. +- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
  1162. +- * additional TX locking checks.
  1163. +- *
  1164. +- * For the lack of a better place do the check here.
  1165. +- */
  1166. +- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
  1167. +- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1168. +- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
  1169. +- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1170. +-
  1171. +- ret = ath10k_pci_wake(ar);
  1172. +- if (ret)
  1173. +- return ret;
  1174. +-
  1175. +- spin_lock_bh(&ar_pci->ce_lock);
  1176. +- ce_state->ar = ar;
  1177. +- ce_state->id = ce_id;
  1178. +- ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
  1179. +- ce_state->attr_flags = attr->flags;
  1180. +- ce_state->src_sz_max = attr->src_sz_max;
  1181. +- spin_unlock_bh(&ar_pci->ce_lock);
  1182. +-
  1183. + if (attr->src_nentries) {
  1184. + ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
  1185. + if (ret) {
  1186. +- ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
  1187. ++ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
  1188. + ce_id, ret);
  1189. +- goto out;
  1190. ++ return ret;
  1191. + }
  1192. + }
  1193. +
  1194. + if (attr->dest_nentries) {
  1195. + ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
  1196. + if (ret) {
  1197. +- ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
  1198. ++ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
  1199. + ce_id, ret);
  1200. +- goto out;
  1201. ++ return ret;
  1202. + }
  1203. + }
  1204. +
  1205. +-out:
  1206. +- ath10k_pci_sleep(ar);
  1207. +- return ret;
  1208. ++ return 0;
  1209. + }
  1210. +
  1211. + static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
  1212. + {
  1213. +- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  1214. ++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1215. +
  1216. + ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
  1217. + ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
  1218. +@@ -1101,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(st
  1219. +
  1220. + static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
  1221. + {
  1222. +- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  1223. ++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1224. +
  1225. + ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
  1226. + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
  1227. +@@ -1110,30 +1070,49 @@ static void ath10k_ce_deinit_dest_ring(s
  1228. +
  1229. + void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
  1230. + {
  1231. +- int ret;
  1232. +-
  1233. +- ret = ath10k_pci_wake(ar);
  1234. +- if (ret)
  1235. +- return;
  1236. +-
  1237. + ath10k_ce_deinit_src_ring(ar, ce_id);
  1238. + ath10k_ce_deinit_dest_ring(ar, ce_id);
  1239. +-
  1240. +- ath10k_pci_sleep(ar);
  1241. + }
  1242. +
  1243. + int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
  1244. +- const struct ce_attr *attr)
  1245. ++ const struct ce_attr *attr,
  1246. ++ void (*send_cb)(struct ath10k_ce_pipe *),
  1247. ++ void (*recv_cb)(struct ath10k_ce_pipe *))
  1248. + {
  1249. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  1250. + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  1251. + int ret;
  1252. +
  1253. ++ /*
  1254. ++ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
  1255. ++ * additional TX locking checks.
  1256. ++ *
  1257. ++ * For the lack of a better place do the check here.
  1258. ++ */
  1259. ++ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
  1260. ++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1261. ++ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
  1262. ++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1263. ++ BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
  1264. ++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  1265. ++
  1266. ++ ce_state->ar = ar;
  1267. ++ ce_state->id = ce_id;
  1268. ++ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
  1269. ++ ce_state->attr_flags = attr->flags;
  1270. ++ ce_state->src_sz_max = attr->src_sz_max;
  1271. ++
  1272. ++ if (attr->src_nentries)
  1273. ++ ce_state->send_cb = send_cb;
  1274. ++
  1275. ++ if (attr->dest_nentries)
  1276. ++ ce_state->recv_cb = recv_cb;
  1277. ++
  1278. + if (attr->src_nentries) {
  1279. + ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
  1280. + if (IS_ERR(ce_state->src_ring)) {
  1281. + ret = PTR_ERR(ce_state->src_ring);
  1282. +- ath10k_err("failed to allocate copy engine source ring %d: %d\n",
  1283. ++ ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
  1284. + ce_id, ret);
  1285. + ce_state->src_ring = NULL;
  1286. + return ret;
  1287. +@@ -1145,7 +1124,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *
  1288. + attr);
  1289. + if (IS_ERR(ce_state->dest_ring)) {
  1290. + ret = PTR_ERR(ce_state->dest_ring);
  1291. +- ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
  1292. ++ ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
  1293. + ce_id, ret);
  1294. + ce_state->dest_ring = NULL;
  1295. + return ret;
  1296. +--- a/drivers/net/wireless/ath/ath10k/ce.h
  1297. ++++ b/drivers/net/wireless/ath/ath10k/ce.h
  1298. +@@ -20,7 +20,6 @@
  1299. +
  1300. + #include "hif.h"
  1301. +
  1302. +-
  1303. + /* Maximum number of Copy Engine's supported */
  1304. + #define CE_COUNT_MAX 8
  1305. + #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
  1306. +@@ -37,11 +36,10 @@
  1307. +
  1308. + struct ath10k_ce_pipe;
  1309. +
  1310. +-
  1311. + #define CE_DESC_FLAGS_GATHER (1 << 0)
  1312. + #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
  1313. + #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
  1314. +-#define CE_DESC_FLAGS_META_DATA_LSB 3
  1315. ++#define CE_DESC_FLAGS_META_DATA_LSB 2
  1316. +
  1317. + struct ce_desc {
  1318. + __le32 addr;
  1319. +@@ -160,30 +158,15 @@ int ath10k_ce_send_nolock(struct ath10k_
  1320. + unsigned int transfer_id,
  1321. + unsigned int flags);
  1322. +
  1323. +-void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
  1324. +- void (*send_cb)(struct ath10k_ce_pipe *),
  1325. +- int disable_interrupts);
  1326. ++void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
  1327. +
  1328. + int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
  1329. +
  1330. + /*==================Recv=======================*/
  1331. +
  1332. +-/*
  1333. +- * Make a buffer available to receive. The buffer must be at least of a
  1334. +- * minimal size appropriate for this copy engine (src_sz_max attribute).
  1335. +- * ce - which copy engine to use
  1336. +- * per_transfer_recv_context - context passed back to caller's recv_cb
  1337. +- * buffer - address of buffer in CE space
  1338. +- * Returns 0 on success; otherwise an error status.
  1339. +- *
  1340. +- * Implemenation note: Pushes a buffer to Dest ring.
  1341. +- */
  1342. +-int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
  1343. +- void *per_transfer_recv_context,
  1344. +- u32 buffer);
  1345. +-
  1346. +-void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
  1347. +- void (*recv_cb)(struct ath10k_ce_pipe *));
  1348. ++int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
  1349. ++int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
  1350. ++int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
  1351. +
  1352. + /* recv flags */
  1353. + /* Data is byte-swapped */
  1354. +@@ -204,10 +187,16 @@ int ath10k_ce_completed_recv_next(struct
  1355. + * Pops 1 completed send buffer from Source ring.
  1356. + */
  1357. + int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
  1358. +- void **per_transfer_contextp,
  1359. +- u32 *bufferp,
  1360. +- unsigned int *nbytesp,
  1361. +- unsigned int *transfer_idp);
  1362. ++ void **per_transfer_contextp,
  1363. ++ u32 *bufferp,
  1364. ++ unsigned int *nbytesp,
  1365. ++ unsigned int *transfer_idp);
  1366. ++
  1367. ++int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  1368. ++ void **per_transfer_contextp,
  1369. ++ u32 *bufferp,
  1370. ++ unsigned int *nbytesp,
  1371. ++ unsigned int *transfer_idp);
  1372. +
  1373. + /*==================CE Engine Initialization=======================*/
  1374. +
  1375. +@@ -215,7 +204,9 @@ int ath10k_ce_init_pipe(struct ath10k *a
  1376. + const struct ce_attr *attr);
  1377. + void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
  1378. + int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
  1379. +- const struct ce_attr *attr);
  1380. ++ const struct ce_attr *attr,
  1381. ++ void (*send_cb)(struct ath10k_ce_pipe *),
  1382. ++ void (*recv_cb)(struct ath10k_ce_pipe *));
  1383. + void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
  1384. +
  1385. + /*==================CE Engine Shutdown=======================*/
  1386. +@@ -228,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct at
  1387. + void **per_transfer_contextp,
  1388. + u32 *bufferp);
  1389. +
  1390. ++int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  1391. ++ void **per_transfer_contextp,
  1392. ++ u32 *bufferp,
  1393. ++ unsigned int *nbytesp,
  1394. ++ unsigned int *transfer_idp,
  1395. ++ unsigned int *flagsp);
  1396. ++
  1397. + /*
  1398. + * Support clean shutdown by allowing the caller to cancel
  1399. + * pending sends. Target DMA must be stopped before using
  1400. +@@ -243,6 +241,7 @@ int ath10k_ce_cancel_send_next(struct at
  1401. + void ath10k_ce_per_engine_service_any(struct ath10k *ar);
  1402. + void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
  1403. + int ath10k_ce_disable_interrupts(struct ath10k *ar);
  1404. ++void ath10k_ce_enable_interrupts(struct ath10k *ar);
  1405. +
  1406. + /* ce_attr.flags values */
  1407. + /* Use NonSnooping PCIe accesses? */
  1408. +@@ -395,8 +394,7 @@ struct ce_attr {
  1409. + #define DST_WATERMARK_HIGH_RESET 0
  1410. + #define DST_WATERMARK_ADDRESS 0x0050
  1411. +
  1412. +-
  1413. +-static inline u32 ath10k_ce_base_address(unsigned int ce_id)
  1414. ++static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
  1415. + {
  1416. + return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
  1417. + }
  1418. +--- a/drivers/net/wireless/ath/ath10k/core.c
  1419. ++++ b/drivers/net/wireless/ath/ath10k/core.c
  1420. +@@ -17,6 +17,7 @@
  1421. +
  1422. + #include <linux/module.h>
  1423. + #include <linux/firmware.h>
  1424. ++#include <linux/of.h>
  1425. +
  1426. + #include "core.h"
  1427. + #include "mac.h"
  1428. +@@ -26,68 +27,88 @@
  1429. + #include "bmi.h"
  1430. + #include "debug.h"
  1431. + #include "htt.h"
  1432. ++#include "testmode.h"
  1433. ++#include "wmi-ops.h"
  1434. +
  1435. + unsigned int ath10k_debug_mask;
  1436. + static bool uart_print;
  1437. +-static unsigned int ath10k_p2p;
  1438. ++static bool skip_otp;
  1439. ++
  1440. + module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
  1441. + module_param(uart_print, bool, 0644);
  1442. +-module_param_named(p2p, ath10k_p2p, uint, 0644);
  1443. ++module_param(skip_otp, bool, 0644);
  1444. ++
  1445. + MODULE_PARM_DESC(debug_mask, "Debugging mask");
  1446. + MODULE_PARM_DESC(uart_print, "Uart target debugging");
  1447. +-MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
  1448. ++MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
  1449. +
  1450. + static const struct ath10k_hw_params ath10k_hw_params_list[] = {
  1451. + {
  1452. + .id = QCA988X_HW_2_0_VERSION,
  1453. + .name = "qca988x hw2.0",
  1454. + .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
  1455. ++ .uart_pin = 7,
  1456. + .fw = {
  1457. + .dir = QCA988X_HW_2_0_FW_DIR,
  1458. + .fw = QCA988X_HW_2_0_FW_FILE,
  1459. + .otp = QCA988X_HW_2_0_OTP_FILE,
  1460. + .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
  1461. ++ .board_size = QCA988X_BOARD_DATA_SZ,
  1462. ++ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
  1463. ++ },
  1464. ++ },
  1465. ++ {
  1466. ++ .id = QCA6174_HW_2_1_VERSION,
  1467. ++ .name = "qca6174 hw2.1",
  1468. ++ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
  1469. ++ .uart_pin = 6,
  1470. ++ .fw = {
  1471. ++ .dir = QCA6174_HW_2_1_FW_DIR,
  1472. ++ .fw = QCA6174_HW_2_1_FW_FILE,
  1473. ++ .otp = QCA6174_HW_2_1_OTP_FILE,
  1474. ++ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
  1475. ++ .board_size = QCA6174_BOARD_DATA_SZ,
  1476. ++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
  1477. ++ },
  1478. ++ },
  1479. ++ {
  1480. ++ .id = QCA6174_HW_3_0_VERSION,
  1481. ++ .name = "qca6174 hw3.0",
  1482. ++ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
  1483. ++ .uart_pin = 6,
  1484. ++ .fw = {
  1485. ++ .dir = QCA6174_HW_3_0_FW_DIR,
  1486. ++ .fw = QCA6174_HW_3_0_FW_FILE,
  1487. ++ .otp = QCA6174_HW_3_0_OTP_FILE,
  1488. ++ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
  1489. ++ .board_size = QCA6174_BOARD_DATA_SZ,
  1490. ++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
  1491. ++ },
  1492. ++ },
  1493. ++ {
  1494. ++ .id = QCA6174_HW_3_2_VERSION,
  1495. ++ .name = "qca6174 hw3.2",
  1496. ++ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
  1497. ++ .uart_pin = 6,
  1498. ++ .fw = {
  1499. ++ /* uses same binaries as hw3.0 */
  1500. ++ .dir = QCA6174_HW_3_0_FW_DIR,
  1501. ++ .fw = QCA6174_HW_3_0_FW_FILE,
  1502. ++ .otp = QCA6174_HW_3_0_OTP_FILE,
  1503. ++ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
  1504. ++ .board_size = QCA6174_BOARD_DATA_SZ,
  1505. ++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
  1506. + },
  1507. + },
  1508. + };
  1509. +
  1510. + static void ath10k_send_suspend_complete(struct ath10k *ar)
  1511. + {
  1512. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
  1513. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
  1514. +
  1515. + complete(&ar->target_suspend);
  1516. + }
  1517. +
  1518. +-static int ath10k_init_connect_htc(struct ath10k *ar)
  1519. +-{
  1520. +- int status;
  1521. +-
  1522. +- status = ath10k_wmi_connect_htc_service(ar);
  1523. +- if (status)
  1524. +- goto conn_fail;
  1525. +-
  1526. +- /* Start HTC */
  1527. +- status = ath10k_htc_start(&ar->htc);
  1528. +- if (status)
  1529. +- goto conn_fail;
  1530. +-
  1531. +- /* Wait for WMI event to be ready */
  1532. +- status = ath10k_wmi_wait_for_service_ready(ar);
  1533. +- if (status <= 0) {
  1534. +- ath10k_warn("wmi service ready event not received");
  1535. +- status = -ETIMEDOUT;
  1536. +- goto timeout;
  1537. +- }
  1538. +-
  1539. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
  1540. +- return 0;
  1541. +-
  1542. +-timeout:
  1543. +- ath10k_htc_stop(&ar->htc);
  1544. +-conn_fail:
  1545. +- return status;
  1546. +-}
  1547. +-
  1548. + static int ath10k_init_configure_target(struct ath10k *ar)
  1549. + {
  1550. + u32 param_host;
  1551. +@@ -97,14 +118,14 @@ static int ath10k_init_configure_target(
  1552. + ret = ath10k_bmi_write32(ar, hi_app_host_interest,
  1553. + HTC_PROTOCOL_VERSION);
  1554. + if (ret) {
  1555. +- ath10k_err("settings HTC version failed\n");
  1556. ++ ath10k_err(ar, "settings HTC version failed\n");
  1557. + return ret;
  1558. + }
  1559. +
  1560. + /* set the firmware mode to STA/IBSS/AP */
  1561. + ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
  1562. + if (ret) {
  1563. +- ath10k_err("setting firmware mode (1/2) failed\n");
  1564. ++ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
  1565. + return ret;
  1566. + }
  1567. +
  1568. +@@ -123,14 +144,14 @@ static int ath10k_init_configure_target(
  1569. +
  1570. + ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
  1571. + if (ret) {
  1572. +- ath10k_err("setting firmware mode (2/2) failed\n");
  1573. ++ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
  1574. + return ret;
  1575. + }
  1576. +
  1577. + /* We do all byte-swapping on the host */
  1578. + ret = ath10k_bmi_write32(ar, hi_be, 0);
  1579. + if (ret) {
  1580. +- ath10k_err("setting host CPU BE mode failed\n");
  1581. ++ ath10k_err(ar, "setting host CPU BE mode failed\n");
  1582. + return ret;
  1583. + }
  1584. +
  1585. +@@ -138,7 +159,7 @@ static int ath10k_init_configure_target(
  1586. + ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
  1587. +
  1588. + if (ret) {
  1589. +- ath10k_err("setting FW data/desc swap flags failed\n");
  1590. ++ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
  1591. + return ret;
  1592. + }
  1593. +
  1594. +@@ -167,79 +188,83 @@ static const struct firmware *ath10k_fet
  1595. + return fw;
  1596. + }
  1597. +
  1598. +-static int ath10k_push_board_ext_data(struct ath10k *ar)
  1599. ++static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
  1600. ++ size_t data_len)
  1601. + {
  1602. +- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
  1603. +- u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
  1604. ++ u32 board_data_size = ar->hw_params.fw.board_size;
  1605. ++ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
  1606. + u32 board_ext_data_addr;
  1607. + int ret;
  1608. +
  1609. + ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
  1610. + if (ret) {
  1611. +- ath10k_err("could not read board ext data addr (%d)\n", ret);
  1612. ++ ath10k_err(ar, "could not read board ext data addr (%d)\n",
  1613. ++ ret);
  1614. + return ret;
  1615. + }
  1616. +
  1617. +- ath10k_dbg(ATH10K_DBG_BOOT,
  1618. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1619. + "boot push board extended data addr 0x%x\n",
  1620. + board_ext_data_addr);
  1621. +
  1622. + if (board_ext_data_addr == 0)
  1623. + return 0;
  1624. +
  1625. +- if (ar->board_len != (board_data_size + board_ext_data_size)) {
  1626. +- ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
  1627. +- ar->board_len, board_data_size, board_ext_data_size);
  1628. ++ if (data_len != (board_data_size + board_ext_data_size)) {
  1629. ++ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
  1630. ++ data_len, board_data_size, board_ext_data_size);
  1631. + return -EINVAL;
  1632. + }
  1633. +
  1634. + ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
  1635. +- ar->board_data + board_data_size,
  1636. ++ data + board_data_size,
  1637. + board_ext_data_size);
  1638. + if (ret) {
  1639. +- ath10k_err("could not write board ext data (%d)\n", ret);
  1640. ++ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
  1641. + return ret;
  1642. + }
  1643. +
  1644. + ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
  1645. + (board_ext_data_size << 16) | 1);
  1646. + if (ret) {
  1647. +- ath10k_err("could not write board ext data bit (%d)\n", ret);
  1648. ++ ath10k_err(ar, "could not write board ext data bit (%d)\n",
  1649. ++ ret);
  1650. + return ret;
  1651. + }
  1652. +
  1653. + return 0;
  1654. + }
  1655. +
  1656. +-static int ath10k_download_board_data(struct ath10k *ar)
  1657. ++static int ath10k_download_board_data(struct ath10k *ar, const void *data,
  1658. ++ size_t data_len)
  1659. + {
  1660. +- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
  1661. ++ u32 board_data_size = ar->hw_params.fw.board_size;
  1662. + u32 address;
  1663. + int ret;
  1664. +
  1665. +- ret = ath10k_push_board_ext_data(ar);
  1666. ++ ret = ath10k_push_board_ext_data(ar, data, data_len);
  1667. + if (ret) {
  1668. +- ath10k_err("could not push board ext data (%d)\n", ret);
  1669. ++ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
  1670. + goto exit;
  1671. + }
  1672. +
  1673. + ret = ath10k_bmi_read32(ar, hi_board_data, &address);
  1674. + if (ret) {
  1675. +- ath10k_err("could not read board data addr (%d)\n", ret);
  1676. ++ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
  1677. + goto exit;
  1678. + }
  1679. +
  1680. +- ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
  1681. ++ ret = ath10k_bmi_write_memory(ar, address, data,
  1682. + min_t(u32, board_data_size,
  1683. +- ar->board_len));
  1684. ++ data_len));
  1685. + if (ret) {
  1686. +- ath10k_err("could not write board data (%d)\n", ret);
  1687. ++ ath10k_err(ar, "could not write board data (%d)\n", ret);
  1688. + goto exit;
  1689. + }
  1690. +
  1691. + ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
  1692. + if (ret) {
  1693. +- ath10k_err("could not write board data bit (%d)\n", ret);
  1694. ++ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
  1695. + goto exit;
  1696. + }
  1697. +
  1698. +@@ -247,73 +272,182 @@ exit:
  1699. + return ret;
  1700. + }
  1701. +
  1702. ++static int ath10k_download_cal_file(struct ath10k *ar)
  1703. ++{
  1704. ++ int ret;
  1705. ++
  1706. ++ if (!ar->cal_file)
  1707. ++ return -ENOENT;
  1708. ++
  1709. ++ if (IS_ERR(ar->cal_file))
  1710. ++ return PTR_ERR(ar->cal_file);
  1711. ++
  1712. ++ ret = ath10k_download_board_data(ar, ar->cal_file->data,
  1713. ++ ar->cal_file->size);
  1714. ++ if (ret) {
  1715. ++ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
  1716. ++ return ret;
  1717. ++ }
  1718. ++
  1719. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
  1720. ++
  1721. ++ return 0;
  1722. ++}
  1723. ++
  1724. ++static int ath10k_download_cal_dt(struct ath10k *ar)
  1725. ++{
  1726. ++ struct device_node *node;
  1727. ++ int data_len;
  1728. ++ void *data;
  1729. ++ int ret;
  1730. ++
  1731. ++ node = ar->dev->of_node;
  1732. ++ if (!node)
  1733. ++ /* Device Tree is optional, don't print any warnings if
  1734. ++ * there's no node for ath10k.
  1735. ++ */
  1736. ++ return -ENOENT;
  1737. ++
  1738. ++ if (!of_get_property(node, "qcom,ath10k-calibration-data",
  1739. ++ &data_len)) {
  1740. ++ /* The calibration data node is optional */
  1741. ++ return -ENOENT;
  1742. ++ }
  1743. ++
  1744. ++ if (data_len != QCA988X_CAL_DATA_LEN) {
  1745. ++ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
  1746. ++ data_len);
  1747. ++ ret = -EMSGSIZE;
  1748. ++ goto out;
  1749. ++ }
  1750. ++
  1751. ++ data = kmalloc(data_len, GFP_KERNEL);
  1752. ++ if (!data) {
  1753. ++ ret = -ENOMEM;
  1754. ++ goto out;
  1755. ++ }
  1756. ++
  1757. ++ ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
  1758. ++ data, data_len);
  1759. ++ if (ret) {
  1760. ++ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
  1761. ++ ret);
  1762. ++ goto out_free;
  1763. ++ }
  1764. ++
  1765. ++ ret = ath10k_download_board_data(ar, data, data_len);
  1766. ++ if (ret) {
  1767. ++ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
  1768. ++ ret);
  1769. ++ goto out_free;
  1770. ++ }
  1771. ++
  1772. ++ ret = 0;
  1773. ++
  1774. ++out_free:
  1775. ++ kfree(data);
  1776. ++
  1777. ++out:
  1778. ++ return ret;
  1779. ++}
  1780. ++
  1781. + static int ath10k_download_and_run_otp(struct ath10k *ar)
  1782. + {
  1783. + u32 result, address = ar->hw_params.patch_load_addr;
  1784. + int ret;
  1785. +
  1786. ++ ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
  1787. ++ if (ret) {
  1788. ++ ath10k_err(ar, "failed to download board data: %d\n", ret);
  1789. ++ return ret;
  1790. ++ }
  1791. ++
  1792. + /* OTP is optional */
  1793. +
  1794. + if (!ar->otp_data || !ar->otp_len) {
  1795. +- ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
  1796. ++ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
  1797. + ar->otp_data, ar->otp_len);
  1798. + return 0;
  1799. + }
  1800. +
  1801. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
  1802. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
  1803. + address, ar->otp_len);
  1804. +
  1805. + ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
  1806. + if (ret) {
  1807. +- ath10k_err("could not write otp (%d)\n", ret);
  1808. ++ ath10k_err(ar, "could not write otp (%d)\n", ret);
  1809. + return ret;
  1810. + }
  1811. +
  1812. + ret = ath10k_bmi_execute(ar, address, 0, &result);
  1813. + if (ret) {
  1814. +- ath10k_err("could not execute otp (%d)\n", ret);
  1815. ++ ath10k_err(ar, "could not execute otp (%d)\n", ret);
  1816. + return ret;
  1817. + }
  1818. +
  1819. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
  1820. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
  1821. +
  1822. +- if (result != 0) {
  1823. +- ath10k_err("otp calibration failed: %d", result);
  1824. ++ if (!skip_otp && result != 0) {
  1825. ++ ath10k_err(ar, "otp calibration failed: %d", result);
  1826. + return -EINVAL;
  1827. + }
  1828. +
  1829. + return 0;
  1830. + }
  1831. +
  1832. +-static int ath10k_download_fw(struct ath10k *ar)
  1833. ++static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
  1834. + {
  1835. +- u32 address;
  1836. ++ u32 address, data_len;
  1837. ++ const char *mode_name;
  1838. ++ const void *data;
  1839. + int ret;
  1840. +
  1841. + address = ar->hw_params.patch_load_addr;
  1842. +
  1843. +- ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
  1844. +- ar->firmware_len);
  1845. ++ switch (mode) {
  1846. ++ case ATH10K_FIRMWARE_MODE_NORMAL:
  1847. ++ data = ar->firmware_data;
  1848. ++ data_len = ar->firmware_len;
  1849. ++ mode_name = "normal";
  1850. ++ break;
  1851. ++ case ATH10K_FIRMWARE_MODE_UTF:
  1852. ++ data = ar->testmode.utf->data;
  1853. ++ data_len = ar->testmode.utf->size;
  1854. ++ mode_name = "utf";
  1855. ++ break;
  1856. ++ default:
  1857. ++ ath10k_err(ar, "unknown firmware mode: %d\n", mode);
  1858. ++ return -EINVAL;
  1859. ++ }
  1860. ++
  1861. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  1862. ++ "boot uploading firmware image %p len %d mode %s\n",
  1863. ++ data, data_len, mode_name);
  1864. ++
  1865. ++ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
  1866. + if (ret) {
  1867. +- ath10k_err("could not write fw (%d)\n", ret);
  1868. +- goto exit;
  1869. ++ ath10k_err(ar, "failed to download %s firmware: %d\n",
  1870. ++ mode_name, ret);
  1871. ++ return ret;
  1872. + }
  1873. +
  1874. +-exit:
  1875. + return ret;
  1876. + }
  1877. +
  1878. + static void ath10k_core_free_firmware_files(struct ath10k *ar)
  1879. + {
  1880. +- if (ar->board && !IS_ERR(ar->board))
  1881. ++ if (!IS_ERR(ar->board))
  1882. + release_firmware(ar->board);
  1883. +
  1884. +- if (ar->otp && !IS_ERR(ar->otp))
  1885. ++ if (!IS_ERR(ar->otp))
  1886. + release_firmware(ar->otp);
  1887. +
  1888. +- if (ar->firmware && !IS_ERR(ar->firmware))
  1889. ++ if (!IS_ERR(ar->firmware))
  1890. + release_firmware(ar->firmware);
  1891. +
  1892. ++ if (!IS_ERR(ar->cal_file))
  1893. ++ release_firmware(ar->cal_file);
  1894. ++
  1895. + ar->board = NULL;
  1896. + ar->board_data = NULL;
  1897. + ar->board_len = 0;
  1898. +@@ -325,6 +459,27 @@ static void ath10k_core_free_firmware_fi
  1899. + ar->firmware = NULL;
  1900. + ar->firmware_data = NULL;
  1901. + ar->firmware_len = 0;
  1902. ++
  1903. ++ ar->cal_file = NULL;
  1904. ++}
  1905. ++
  1906. ++static int ath10k_fetch_cal_file(struct ath10k *ar)
  1907. ++{
  1908. ++ char filename[100];
  1909. ++
  1910. ++ /* cal-<bus>-<id>.bin */
  1911. ++ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
  1912. ++ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
  1913. ++
  1914. ++ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
  1915. ++ if (IS_ERR(ar->cal_file))
  1916. ++ /* calibration file is optional, don't print any warnings */
  1917. ++ return PTR_ERR(ar->cal_file);
  1918. ++
  1919. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
  1920. ++ ATH10K_FW_DIR, filename);
  1921. ++
  1922. ++ return 0;
  1923. + }
  1924. +
  1925. + static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
  1926. +@@ -332,12 +487,12 @@ static int ath10k_core_fetch_firmware_ap
  1927. + int ret = 0;
  1928. +
  1929. + if (ar->hw_params.fw.fw == NULL) {
  1930. +- ath10k_err("firmware file not defined\n");
  1931. ++ ath10k_err(ar, "firmware file not defined\n");
  1932. + return -EINVAL;
  1933. + }
  1934. +
  1935. + if (ar->hw_params.fw.board == NULL) {
  1936. +- ath10k_err("board data file not defined");
  1937. ++ ath10k_err(ar, "board data file not defined");
  1938. + return -EINVAL;
  1939. + }
  1940. +
  1941. +@@ -346,7 +501,7 @@ static int ath10k_core_fetch_firmware_ap
  1942. + ar->hw_params.fw.board);
  1943. + if (IS_ERR(ar->board)) {
  1944. + ret = PTR_ERR(ar->board);
  1945. +- ath10k_err("could not fetch board data (%d)\n", ret);
  1946. ++ ath10k_err(ar, "could not fetch board data (%d)\n", ret);
  1947. + goto err;
  1948. + }
  1949. +
  1950. +@@ -358,7 +513,7 @@ static int ath10k_core_fetch_firmware_ap
  1951. + ar->hw_params.fw.fw);
  1952. + if (IS_ERR(ar->firmware)) {
  1953. + ret = PTR_ERR(ar->firmware);
  1954. +- ath10k_err("could not fetch firmware (%d)\n", ret);
  1955. ++ ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
  1956. + goto err;
  1957. + }
  1958. +
  1959. +@@ -374,7 +529,7 @@ static int ath10k_core_fetch_firmware_ap
  1960. + ar->hw_params.fw.otp);
  1961. + if (IS_ERR(ar->otp)) {
  1962. + ret = PTR_ERR(ar->otp);
  1963. +- ath10k_err("could not fetch otp (%d)\n", ret);
  1964. ++ ath10k_err(ar, "could not fetch otp (%d)\n", ret);
  1965. + goto err;
  1966. + }
  1967. +
  1968. +@@ -394,12 +549,12 @@ static int ath10k_core_fetch_firmware_ap
  1969. + int ie_id, i, index, bit, ret;
  1970. + struct ath10k_fw_ie *hdr;
  1971. + const u8 *data;
  1972. +- __le32 *timestamp;
  1973. ++ __le32 *timestamp, *version;
  1974. +
  1975. + /* first fetch the firmware file (firmware-*.bin) */
  1976. + ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
  1977. + if (IS_ERR(ar->firmware)) {
  1978. +- ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
  1979. ++ ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
  1980. + ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
  1981. + return PTR_ERR(ar->firmware);
  1982. + }
  1983. +@@ -411,14 +566,14 @@ static int ath10k_core_fetch_firmware_ap
  1984. + magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
  1985. +
  1986. + if (len < magic_len) {
  1987. +- ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
  1988. ++ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
  1989. + ar->hw_params.fw.dir, name, len);
  1990. + ret = -EINVAL;
  1991. + goto err;
  1992. + }
  1993. +
  1994. + if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
  1995. +- ath10k_err("invalid firmware magic\n");
  1996. ++ ath10k_err(ar, "invalid firmware magic\n");
  1997. + ret = -EINVAL;
  1998. + goto err;
  1999. + }
  2000. +@@ -440,7 +595,7 @@ static int ath10k_core_fetch_firmware_ap
  2001. + data += sizeof(*hdr);
  2002. +
  2003. + if (len < ie_len) {
  2004. +- ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
  2005. ++ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
  2006. + ie_id, len, ie_len);
  2007. + ret = -EINVAL;
  2008. + goto err;
  2009. +@@ -454,7 +609,7 @@ static int ath10k_core_fetch_firmware_ap
  2010. + memcpy(ar->hw->wiphy->fw_version, data, ie_len);
  2011. + ar->hw->wiphy->fw_version[ie_len] = '\0';
  2012. +
  2013. +- ath10k_dbg(ATH10K_DBG_BOOT,
  2014. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2015. + "found fw version %s\n",
  2016. + ar->hw->wiphy->fw_version);
  2017. + break;
  2018. +@@ -464,11 +619,11 @@ static int ath10k_core_fetch_firmware_ap
  2019. +
  2020. + timestamp = (__le32 *)data;
  2021. +
  2022. +- ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
  2023. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
  2024. + le32_to_cpup(timestamp));
  2025. + break;
  2026. + case ATH10K_FW_IE_FEATURES:
  2027. +- ath10k_dbg(ATH10K_DBG_BOOT,
  2028. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2029. + "found firmware features ie (%zd B)\n",
  2030. + ie_len);
  2031. +
  2032. +@@ -480,19 +635,19 @@ static int ath10k_core_fetch_firmware_ap
  2033. + break;
  2034. +
  2035. + if (data[index] & (1 << bit)) {
  2036. +- ath10k_dbg(ATH10K_DBG_BOOT,
  2037. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2038. + "Enabling feature bit: %i\n",
  2039. + i);
  2040. + __set_bit(i, ar->fw_features);
  2041. + }
  2042. + }
  2043. +
  2044. +- ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
  2045. ++ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
  2046. + ar->fw_features,
  2047. + sizeof(ar->fw_features));
  2048. + break;
  2049. + case ATH10K_FW_IE_FW_IMAGE:
  2050. +- ath10k_dbg(ATH10K_DBG_BOOT,
  2051. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2052. + "found fw image ie (%zd B)\n",
  2053. + ie_len);
  2054. +
  2055. +@@ -501,7 +656,7 @@ static int ath10k_core_fetch_firmware_ap
  2056. +
  2057. + break;
  2058. + case ATH10K_FW_IE_OTP_IMAGE:
  2059. +- ath10k_dbg(ATH10K_DBG_BOOT,
  2060. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2061. + "found otp image ie (%zd B)\n",
  2062. + ie_len);
  2063. +
  2064. +@@ -509,8 +664,19 @@ static int ath10k_core_fetch_firmware_ap
  2065. + ar->otp_len = ie_len;
  2066. +
  2067. + break;
  2068. ++ case ATH10K_FW_IE_WMI_OP_VERSION:
  2069. ++ if (ie_len != sizeof(u32))
  2070. ++ break;
  2071. ++
  2072. ++ version = (__le32 *)data;
  2073. ++
  2074. ++ ar->wmi.op_version = le32_to_cpup(version);
  2075. ++
  2076. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
  2077. ++ ar->wmi.op_version);
  2078. ++ break;
  2079. + default:
  2080. +- ath10k_warn("Unknown FW IE: %u\n",
  2081. ++ ath10k_warn(ar, "Unknown FW IE: %u\n",
  2082. + le32_to_cpu(hdr->id));
  2083. + break;
  2084. + }
  2085. +@@ -523,7 +689,7 @@ static int ath10k_core_fetch_firmware_ap
  2086. + }
  2087. +
  2088. + if (!ar->firmware_data || !ar->firmware_len) {
  2089. +- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
  2090. ++ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
  2091. + ar->hw_params.fw.dir, name);
  2092. + ret = -ENOMEDIUM;
  2093. + goto err;
  2094. +@@ -531,7 +697,7 @@ static int ath10k_core_fetch_firmware_ap
  2095. +
  2096. + /* now fetch the board file */
  2097. + if (ar->hw_params.fw.board == NULL) {
  2098. +- ath10k_err("board data file not defined");
  2099. ++ ath10k_err(ar, "board data file not defined");
  2100. + ret = -EINVAL;
  2101. + goto err;
  2102. + }
  2103. +@@ -541,7 +707,7 @@ static int ath10k_core_fetch_firmware_ap
  2104. + ar->hw_params.fw.board);
  2105. + if (IS_ERR(ar->board)) {
  2106. + ret = PTR_ERR(ar->board);
  2107. +- ath10k_err("could not fetch board data '%s/%s' (%d)\n",
  2108. ++ ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
  2109. + ar->hw_params.fw.dir, ar->hw_params.fw.board,
  2110. + ret);
  2111. + goto err;
  2112. +@@ -561,49 +727,79 @@ static int ath10k_core_fetch_firmware_fi
  2113. + {
  2114. + int ret;
  2115. +
  2116. ++ /* calibration file is optional, don't check for any errors */
  2117. ++ ath10k_fetch_cal_file(ar);
  2118. ++
  2119. ++ ar->fw_api = 4;
  2120. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2121. ++
  2122. ++ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
  2123. ++ if (ret == 0)
  2124. ++ goto success;
  2125. ++
  2126. ++ ar->fw_api = 3;
  2127. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2128. ++
  2129. ++ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
  2130. ++ if (ret == 0)
  2131. ++ goto success;
  2132. ++
  2133. + ar->fw_api = 2;
  2134. +- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2135. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2136. +
  2137. + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
  2138. + if (ret == 0)
  2139. + goto success;
  2140. +
  2141. + ar->fw_api = 1;
  2142. +- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2143. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
  2144. +
  2145. + ret = ath10k_core_fetch_firmware_api_1(ar);
  2146. + if (ret)
  2147. + return ret;
  2148. +
  2149. + success:
  2150. +- ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
  2151. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
  2152. +
  2153. + return 0;
  2154. + }
  2155. +
  2156. +-static int ath10k_init_download_firmware(struct ath10k *ar)
  2157. ++static int ath10k_download_cal_data(struct ath10k *ar)
  2158. + {
  2159. + int ret;
  2160. +
  2161. +- ret = ath10k_download_board_data(ar);
  2162. +- if (ret) {
  2163. +- ath10k_err("failed to download board data: %d\n", ret);
  2164. +- return ret;
  2165. ++ ret = ath10k_download_cal_file(ar);
  2166. ++ if (ret == 0) {
  2167. ++ ar->cal_mode = ATH10K_CAL_MODE_FILE;
  2168. ++ goto done;
  2169. + }
  2170. +
  2171. +- ret = ath10k_download_and_run_otp(ar);
  2172. +- if (ret) {
  2173. +- ath10k_err("failed to run otp: %d\n", ret);
  2174. +- return ret;
  2175. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2176. ++ "boot did not find a calibration file, try DT next: %d\n",
  2177. ++ ret);
  2178. ++
  2179. ++ ret = ath10k_download_cal_dt(ar);
  2180. ++ if (ret == 0) {
  2181. ++ ar->cal_mode = ATH10K_CAL_MODE_DT;
  2182. ++ goto done;
  2183. + }
  2184. +
  2185. +- ret = ath10k_download_fw(ar);
  2186. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  2187. ++ "boot did not find DT entry, try OTP next: %d\n",
  2188. ++ ret);
  2189. ++
  2190. ++ ret = ath10k_download_and_run_otp(ar);
  2191. + if (ret) {
  2192. +- ath10k_err("failed to download firmware: %d\n", ret);
  2193. ++ ath10k_err(ar, "failed to run otp: %d\n", ret);
  2194. + return ret;
  2195. + }
  2196. +
  2197. +- return ret;
  2198. ++ ar->cal_mode = ATH10K_CAL_MODE_OTP;
  2199. ++
  2200. ++done:
  2201. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
  2202. ++ ath10k_cal_mode_str(ar->cal_mode));
  2203. ++ return 0;
  2204. + }
  2205. +
  2206. + static int ath10k_init_uart(struct ath10k *ar)
  2207. +@@ -616,33 +812,33 @@ static int ath10k_init_uart(struct ath10
  2208. + */
  2209. + ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
  2210. + if (ret) {
  2211. +- ath10k_warn("could not disable UART prints (%d)\n", ret);
  2212. ++ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
  2213. + return ret;
  2214. + }
  2215. +
  2216. + if (!uart_print)
  2217. + return 0;
  2218. +
  2219. +- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
  2220. ++ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
  2221. + if (ret) {
  2222. +- ath10k_warn("could not enable UART prints (%d)\n", ret);
  2223. ++ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
  2224. + return ret;
  2225. + }
  2226. +
  2227. + ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
  2228. + if (ret) {
  2229. +- ath10k_warn("could not enable UART prints (%d)\n", ret);
  2230. ++ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
  2231. + return ret;
  2232. + }
  2233. +
  2234. + /* Set the UART baud rate to 19200. */
  2235. + ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
  2236. + if (ret) {
  2237. +- ath10k_warn("could not set the baud rate (%d)\n", ret);
  2238. ++ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
  2239. + return ret;
  2240. + }
  2241. +
  2242. +- ath10k_info("UART prints enabled\n");
  2243. ++ ath10k_info(ar, "UART prints enabled\n");
  2244. + return 0;
  2245. + }
  2246. +
  2247. +@@ -659,14 +855,14 @@ static int ath10k_init_hw_params(struct
  2248. + }
  2249. +
  2250. + if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
  2251. +- ath10k_err("Unsupported hardware version: 0x%x\n",
  2252. ++ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
  2253. + ar->target_version);
  2254. + return -EINVAL;
  2255. + }
  2256. +
  2257. + ar->hw_params = *hw_params;
  2258. +
  2259. +- ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
  2260. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
  2261. + ar->hw_params.name, ar->target_version);
  2262. +
  2263. + return 0;
  2264. +@@ -676,101 +872,124 @@ static void ath10k_core_restart(struct w
  2265. + {
  2266. + struct ath10k *ar = container_of(work, struct ath10k, restart_work);
  2267. +
  2268. ++ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
  2269. ++
  2270. ++ /* Place a barrier to make sure the compiler doesn't reorder
  2271. ++ * CRASH_FLUSH and calling other functions.
  2272. ++ */
  2273. ++ barrier();
  2274. ++
  2275. ++ ieee80211_stop_queues(ar->hw);
  2276. ++ ath10k_drain_tx(ar);
  2277. ++ complete_all(&ar->scan.started);
  2278. ++ complete_all(&ar->scan.completed);
  2279. ++ complete_all(&ar->scan.on_channel);
  2280. ++ complete_all(&ar->offchan_tx_completed);
  2281. ++ complete_all(&ar->install_key_done);
  2282. ++ complete_all(&ar->vdev_setup_done);
  2283. ++ complete_all(&ar->thermal.wmi_sync);
  2284. ++ wake_up(&ar->htt.empty_tx_wq);
  2285. ++ wake_up(&ar->wmi.tx_credits_wq);
  2286. ++ wake_up(&ar->peer_mapping_wq);
  2287. ++
  2288. + mutex_lock(&ar->conf_mutex);
  2289. +
  2290. + switch (ar->state) {
  2291. + case ATH10K_STATE_ON:
  2292. + ar->state = ATH10K_STATE_RESTARTING;
  2293. +- ath10k_halt(ar);
  2294. ++ ath10k_hif_stop(ar);
  2295. ++ ath10k_scan_finish(ar);
  2296. + ieee80211_restart_hw(ar->hw);
  2297. + break;
  2298. + case ATH10K_STATE_OFF:
  2299. + /* this can happen if driver is being unloaded
  2300. + * or if the crash happens during FW probing */
  2301. +- ath10k_warn("cannot restart a device that hasn't been started\n");
  2302. ++ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
  2303. + break;
  2304. + case ATH10K_STATE_RESTARTING:
  2305. ++ /* hw restart might be requested from multiple places */
  2306. ++ break;
  2307. + case ATH10K_STATE_RESTARTED:
  2308. + ar->state = ATH10K_STATE_WEDGED;
  2309. + /* fall through */
  2310. + case ATH10K_STATE_WEDGED:
  2311. +- ath10k_warn("device is wedged, will not restart\n");
  2312. ++ ath10k_warn(ar, "device is wedged, will not restart\n");
  2313. ++ break;
  2314. ++ case ATH10K_STATE_UTF:
  2315. ++ ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
  2316. + break;
  2317. + }
  2318. +
  2319. + mutex_unlock(&ar->conf_mutex);
  2320. + }
  2321. +
  2322. +-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
  2323. +- const struct ath10k_hif_ops *hif_ops)
  2324. ++static int ath10k_core_init_firmware_features(struct ath10k *ar)
  2325. + {
  2326. +- struct ath10k *ar;
  2327. +-
  2328. +- ar = ath10k_mac_create();
  2329. +- if (!ar)
  2330. +- return NULL;
  2331. +-
  2332. +- ar->ath_common.priv = ar;
  2333. +- ar->ath_common.hw = ar->hw;
  2334. +-
  2335. +- ar->p2p = !!ath10k_p2p;
  2336. +- ar->dev = dev;
  2337. +-
  2338. +- ar->hif.priv = hif_priv;
  2339. +- ar->hif.ops = hif_ops;
  2340. +-
  2341. +- init_completion(&ar->scan.started);
  2342. +- init_completion(&ar->scan.completed);
  2343. +- init_completion(&ar->scan.on_channel);
  2344. +- init_completion(&ar->target_suspend);
  2345. +-
  2346. +- init_completion(&ar->install_key_done);
  2347. +- init_completion(&ar->vdev_setup_done);
  2348. +-
  2349. +- setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
  2350. +-
  2351. +- ar->workqueue = create_singlethread_workqueue("ath10k_wq");
  2352. +- if (!ar->workqueue)
  2353. +- goto err_wq;
  2354. +-
  2355. +- mutex_init(&ar->conf_mutex);
  2356. +- spin_lock_init(&ar->data_lock);
  2357. +-
  2358. +- INIT_LIST_HEAD(&ar->peers);
  2359. +- init_waitqueue_head(&ar->peer_mapping_wq);
  2360. +-
  2361. +- init_completion(&ar->offchan_tx_completed);
  2362. +- INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
  2363. +- skb_queue_head_init(&ar->offchan_tx_queue);
  2364. +-
  2365. +- INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
  2366. +- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
  2367. +-
  2368. +- INIT_WORK(&ar->restart_work, ath10k_core_restart);
  2369. ++ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
  2370. ++ !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2371. ++ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
  2372. ++ return -EINVAL;
  2373. ++ }
  2374. +
  2375. +- return ar;
  2376. ++ if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
  2377. ++ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
  2378. ++ ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
  2379. ++ return -EINVAL;
  2380. ++ }
  2381. +
  2382. +-err_wq:
  2383. +- ath10k_mac_destroy(ar);
  2384. +- return NULL;
  2385. +-}
  2386. +-EXPORT_SYMBOL(ath10k_core_create);
  2387. ++ /* Backwards compatibility for firmwares without
  2388. ++ * ATH10K_FW_IE_WMI_OP_VERSION.
  2389. ++ */
  2390. ++ if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
  2391. ++ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2392. ++ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
  2393. ++ ar->fw_features))
  2394. ++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
  2395. ++ else
  2396. ++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
  2397. ++ } else {
  2398. ++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
  2399. ++ }
  2400. ++ }
  2401. +
  2402. +-void ath10k_core_destroy(struct ath10k *ar)
  2403. +-{
  2404. +- flush_workqueue(ar->workqueue);
  2405. +- destroy_workqueue(ar->workqueue);
  2406. ++ switch (ar->wmi.op_version) {
  2407. ++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
  2408. ++ ar->max_num_peers = TARGET_NUM_PEERS;
  2409. ++ ar->max_num_stations = TARGET_NUM_STATIONS;
  2410. ++ ar->max_num_vdevs = TARGET_NUM_VDEVS;
  2411. ++ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
  2412. ++ break;
  2413. ++ case ATH10K_FW_WMI_OP_VERSION_10_1:
  2414. ++ case ATH10K_FW_WMI_OP_VERSION_10_2:
  2415. ++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
  2416. ++ ar->max_num_peers = TARGET_10X_NUM_PEERS;
  2417. ++ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
  2418. ++ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
  2419. ++ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
  2420. ++ break;
  2421. ++ case ATH10K_FW_WMI_OP_VERSION_TLV:
  2422. ++ ar->max_num_peers = TARGET_TLV_NUM_PEERS;
  2423. ++ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
  2424. ++ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
  2425. ++ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
  2426. ++ break;
  2427. ++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
  2428. ++ case ATH10K_FW_WMI_OP_VERSION_MAX:
  2429. ++ WARN_ON(1);
  2430. ++ return -EINVAL;
  2431. ++ }
  2432. +
  2433. +- ath10k_mac_destroy(ar);
  2434. ++ return 0;
  2435. + }
  2436. +-EXPORT_SYMBOL(ath10k_core_destroy);
  2437. +
  2438. +-int ath10k_core_start(struct ath10k *ar)
  2439. ++int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
  2440. + {
  2441. + int status;
  2442. +
  2443. + lockdep_assert_held(&ar->conf_mutex);
  2444. +
  2445. ++ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
  2446. ++
  2447. + ath10k_bmi_start(ar);
  2448. +
  2449. + if (ath10k_init_configure_target(ar)) {
  2450. +@@ -778,7 +997,11 @@ int ath10k_core_start(struct ath10k *ar)
  2451. + goto err;
  2452. + }
  2453. +
  2454. +- status = ath10k_init_download_firmware(ar);
  2455. ++ status = ath10k_download_cal_data(ar);
  2456. ++ if (status)
  2457. ++ goto err;
  2458. ++
  2459. ++ status = ath10k_download_fw(ar, mode);
  2460. + if (status)
  2461. + goto err;
  2462. +
  2463. +@@ -791,7 +1014,7 @@ int ath10k_core_start(struct ath10k *ar)
  2464. +
  2465. + status = ath10k_htc_init(ar);
  2466. + if (status) {
  2467. +- ath10k_err("could not init HTC (%d)\n", status);
  2468. ++ ath10k_err(ar, "could not init HTC (%d)\n", status);
  2469. + goto err;
  2470. + }
  2471. +
  2472. +@@ -801,79 +1024,123 @@ int ath10k_core_start(struct ath10k *ar)
  2473. +
  2474. + status = ath10k_wmi_attach(ar);
  2475. + if (status) {
  2476. +- ath10k_err("WMI attach failed: %d\n", status);
  2477. ++ ath10k_err(ar, "WMI attach failed: %d\n", status);
  2478. + goto err;
  2479. + }
  2480. +
  2481. +- status = ath10k_hif_start(ar);
  2482. ++ status = ath10k_htt_init(ar);
  2483. ++ if (status) {
  2484. ++ ath10k_err(ar, "failed to init htt: %d\n", status);
  2485. ++ goto err_wmi_detach;
  2486. ++ }
  2487. ++
  2488. ++ status = ath10k_htt_tx_alloc(&ar->htt);
  2489. + if (status) {
  2490. +- ath10k_err("could not start HIF: %d\n", status);
  2491. ++ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
  2492. + goto err_wmi_detach;
  2493. + }
  2494. +
  2495. ++ status = ath10k_htt_rx_alloc(&ar->htt);
  2496. ++ if (status) {
  2497. ++ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
  2498. ++ goto err_htt_tx_detach;
  2499. ++ }
  2500. ++
  2501. ++ status = ath10k_hif_start(ar);
  2502. ++ if (status) {
  2503. ++ ath10k_err(ar, "could not start HIF: %d\n", status);
  2504. ++ goto err_htt_rx_detach;
  2505. ++ }
  2506. ++
  2507. + status = ath10k_htc_wait_target(&ar->htc);
  2508. + if (status) {
  2509. +- ath10k_err("failed to connect to HTC: %d\n", status);
  2510. ++ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
  2511. + goto err_hif_stop;
  2512. + }
  2513. +
  2514. +- status = ath10k_htt_attach(ar);
  2515. ++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
  2516. ++ status = ath10k_htt_connect(&ar->htt);
  2517. ++ if (status) {
  2518. ++ ath10k_err(ar, "failed to connect htt (%d)\n", status);
  2519. ++ goto err_hif_stop;
  2520. ++ }
  2521. ++ }
  2522. ++
  2523. ++ status = ath10k_wmi_connect(ar);
  2524. + if (status) {
  2525. +- ath10k_err("could not attach htt (%d)\n", status);
  2526. ++ ath10k_err(ar, "could not connect wmi: %d\n", status);
  2527. + goto err_hif_stop;
  2528. + }
  2529. +
  2530. +- status = ath10k_init_connect_htc(ar);
  2531. +- if (status)
  2532. +- goto err_htt_detach;
  2533. ++ status = ath10k_htc_start(&ar->htc);
  2534. ++ if (status) {
  2535. ++ ath10k_err(ar, "failed to start htc: %d\n", status);
  2536. ++ goto err_hif_stop;
  2537. ++ }
  2538. +
  2539. +- ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
  2540. ++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
  2541. ++ status = ath10k_wmi_wait_for_service_ready(ar);
  2542. ++ if (status <= 0) {
  2543. ++ ath10k_warn(ar, "wmi service ready event not received");
  2544. ++ status = -ETIMEDOUT;
  2545. ++ goto err_hif_stop;
  2546. ++ }
  2547. ++ }
  2548. ++
  2549. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
  2550. + ar->hw->wiphy->fw_version);
  2551. +
  2552. + status = ath10k_wmi_cmd_init(ar);
  2553. + if (status) {
  2554. +- ath10k_err("could not send WMI init command (%d)\n", status);
  2555. +- goto err_disconnect_htc;
  2556. ++ ath10k_err(ar, "could not send WMI init command (%d)\n",
  2557. ++ status);
  2558. ++ goto err_hif_stop;
  2559. + }
  2560. +
  2561. + status = ath10k_wmi_wait_for_unified_ready(ar);
  2562. + if (status <= 0) {
  2563. +- ath10k_err("wmi unified ready event not received\n");
  2564. ++ ath10k_err(ar, "wmi unified ready event not received\n");
  2565. + status = -ETIMEDOUT;
  2566. +- goto err_disconnect_htc;
  2567. ++ goto err_hif_stop;
  2568. + }
  2569. +
  2570. +- status = ath10k_htt_attach_target(&ar->htt);
  2571. +- if (status)
  2572. +- goto err_disconnect_htc;
  2573. ++ /* If firmware indicates Full Rx Reorder support it must be used in a
  2574. ++ * slightly different manner. Let HTT code know.
  2575. ++ */
  2576. ++ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
  2577. ++ ar->wmi.svc_map));
  2578. ++
  2579. ++ status = ath10k_htt_rx_ring_refill(ar);
  2580. ++ if (status) {
  2581. ++ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
  2582. ++ goto err_hif_stop;
  2583. ++ }
  2584. ++
  2585. ++ /* we don't care about HTT in UTF mode */
  2586. ++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
  2587. ++ status = ath10k_htt_setup(&ar->htt);
  2588. ++ if (status) {
  2589. ++ ath10k_err(ar, "failed to setup htt: %d\n", status);
  2590. ++ goto err_hif_stop;
  2591. ++ }
  2592. ++ }
  2593. +
  2594. + status = ath10k_debug_start(ar);
  2595. + if (status)
  2596. +- goto err_disconnect_htc;
  2597. +-
  2598. +- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
  2599. +- INIT_LIST_HEAD(&ar->arvifs);
  2600. ++ goto err_hif_stop;
  2601. +
  2602. +- if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  2603. +- ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
  2604. +- ar->hw_params.name,
  2605. +- ar->target_version,
  2606. +- ar->chip_id,
  2607. +- ar->hw->wiphy->fw_version,
  2608. +- ar->fw_api,
  2609. +- ar->htt.target_version_major,
  2610. +- ar->htt.target_version_minor);
  2611. ++ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
  2612. +
  2613. +- __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
  2614. ++ INIT_LIST_HEAD(&ar->arvifs);
  2615. +
  2616. + return 0;
  2617. +
  2618. +-err_disconnect_htc:
  2619. +- ath10k_htc_stop(&ar->htc);
  2620. +-err_htt_detach:
  2621. +- ath10k_htt_detach(&ar->htt);
  2622. + err_hif_stop:
  2623. + ath10k_hif_stop(ar);
  2624. ++err_htt_rx_detach:
  2625. ++ ath10k_htt_rx_free(&ar->htt);
  2626. ++err_htt_tx_detach:
  2627. ++ ath10k_htt_tx_free(&ar->htt);
  2628. + err_wmi_detach:
  2629. + ath10k_wmi_detach(ar);
  2630. + err:
  2631. +@@ -889,14 +1156,14 @@ int ath10k_wait_for_suspend(struct ath10
  2632. +
  2633. + ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
  2634. + if (ret) {
  2635. +- ath10k_warn("could not suspend target (%d)\n", ret);
  2636. ++ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
  2637. + return ret;
  2638. + }
  2639. +
  2640. + ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
  2641. +
  2642. + if (ret == 0) {
  2643. +- ath10k_warn("suspend timed out - target pause event never came\n");
  2644. ++ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
  2645. + return -ETIMEDOUT;
  2646. + }
  2647. +
  2648. +@@ -908,12 +1175,14 @@ void ath10k_core_stop(struct ath10k *ar)
  2649. + lockdep_assert_held(&ar->conf_mutex);
  2650. +
  2651. + /* try to suspend target */
  2652. +- if (ar->state != ATH10K_STATE_RESTARTING)
  2653. ++ if (ar->state != ATH10K_STATE_RESTARTING &&
  2654. ++ ar->state != ATH10K_STATE_UTF)
  2655. + ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
  2656. +
  2657. + ath10k_debug_stop(ar);
  2658. +- ath10k_htc_stop(&ar->htc);
  2659. +- ath10k_htt_detach(&ar->htt);
  2660. ++ ath10k_hif_stop(ar);
  2661. ++ ath10k_htt_tx_free(&ar->htt);
  2662. ++ ath10k_htt_rx_free(&ar->htt);
  2663. + ath10k_wmi_detach(ar);
  2664. + }
  2665. + EXPORT_SYMBOL(ath10k_core_stop);
  2666. +@@ -929,16 +1198,15 @@ static int ath10k_core_probe_fw(struct a
  2667. +
  2668. + ret = ath10k_hif_power_up(ar);
  2669. + if (ret) {
  2670. +- ath10k_err("could not start pci hif (%d)\n", ret);
  2671. ++ ath10k_err(ar, "could not start pci hif (%d)\n", ret);
  2672. + return ret;
  2673. + }
  2674. +
  2675. + memset(&target_info, 0, sizeof(target_info));
  2676. + ret = ath10k_bmi_get_target_info(ar, &target_info);
  2677. + if (ret) {
  2678. +- ath10k_err("could not get target info (%d)\n", ret);
  2679. +- ath10k_hif_power_down(ar);
  2680. +- return ret;
  2681. ++ ath10k_err(ar, "could not get target info (%d)\n", ret);
  2682. ++ goto err_power_down;
  2683. + }
  2684. +
  2685. + ar->target_version = target_info.version;
  2686. +@@ -946,118 +1214,233 @@ static int ath10k_core_probe_fw(struct a
  2687. +
  2688. + ret = ath10k_init_hw_params(ar);
  2689. + if (ret) {
  2690. +- ath10k_err("could not get hw params (%d)\n", ret);
  2691. +- ath10k_hif_power_down(ar);
  2692. +- return ret;
  2693. ++ ath10k_err(ar, "could not get hw params (%d)\n", ret);
  2694. ++ goto err_power_down;
  2695. + }
  2696. +
  2697. + ret = ath10k_core_fetch_firmware_files(ar);
  2698. + if (ret) {
  2699. +- ath10k_err("could not fetch firmware files (%d)\n", ret);
  2700. +- ath10k_hif_power_down(ar);
  2701. +- return ret;
  2702. ++ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
  2703. ++ goto err_power_down;
  2704. ++ }
  2705. ++
  2706. ++ ret = ath10k_core_init_firmware_features(ar);
  2707. ++ if (ret) {
  2708. ++ ath10k_err(ar, "fatal problem with firmware features: %d\n",
  2709. ++ ret);
  2710. ++ goto err_free_firmware_files;
  2711. + }
  2712. +
  2713. + mutex_lock(&ar->conf_mutex);
  2714. +
  2715. +- ret = ath10k_core_start(ar);
  2716. ++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
  2717. + if (ret) {
  2718. +- ath10k_err("could not init core (%d)\n", ret);
  2719. +- ath10k_core_free_firmware_files(ar);
  2720. +- ath10k_hif_power_down(ar);
  2721. +- mutex_unlock(&ar->conf_mutex);
  2722. +- return ret;
  2723. ++ ath10k_err(ar, "could not init core (%d)\n", ret);
  2724. ++ goto err_unlock;
  2725. + }
  2726. +
  2727. ++ ath10k_print_driver_info(ar);
  2728. + ath10k_core_stop(ar);
  2729. +
  2730. + mutex_unlock(&ar->conf_mutex);
  2731. +
  2732. + ath10k_hif_power_down(ar);
  2733. + return 0;
  2734. +-}
  2735. +-
  2736. +-static int ath10k_core_check_chip_id(struct ath10k *ar)
  2737. +-{
  2738. +- u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
  2739. +-
  2740. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
  2741. +- ar->chip_id, hw_revision);
  2742. +
  2743. +- /* Check that we are not using hw1.0 (some of them have same pci id
  2744. +- * as hw2.0) before doing anything else as ath10k crashes horribly
  2745. +- * due to missing hw1.0 workarounds. */
  2746. +- switch (hw_revision) {
  2747. +- case QCA988X_HW_1_0_CHIP_ID_REV:
  2748. +- ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
  2749. +- return -EOPNOTSUPP;
  2750. ++err_unlock:
  2751. ++ mutex_unlock(&ar->conf_mutex);
  2752. +
  2753. +- case QCA988X_HW_2_0_CHIP_ID_REV:
  2754. +- /* known hardware revision, continue normally */
  2755. +- return 0;
  2756. ++err_free_firmware_files:
  2757. ++ ath10k_core_free_firmware_files(ar);
  2758. +
  2759. +- default:
  2760. +- ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
  2761. +- ar->chip_id);
  2762. +- return 0;
  2763. +- }
  2764. ++err_power_down:
  2765. ++ ath10k_hif_power_down(ar);
  2766. +
  2767. +- return 0;
  2768. ++ return ret;
  2769. + }
  2770. +
  2771. +-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
  2772. ++static void ath10k_core_register_work(struct work_struct *work)
  2773. + {
  2774. ++ struct ath10k *ar = container_of(work, struct ath10k, register_work);
  2775. + int status;
  2776. +
  2777. +- ar->chip_id = chip_id;
  2778. +-
  2779. +- status = ath10k_core_check_chip_id(ar);
  2780. +- if (status) {
  2781. +- ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
  2782. +- return status;
  2783. +- }
  2784. +-
  2785. + status = ath10k_core_probe_fw(ar);
  2786. + if (status) {
  2787. +- ath10k_err("could not probe fw (%d)\n", status);
  2788. +- return status;
  2789. ++ ath10k_err(ar, "could not probe fw (%d)\n", status);
  2790. ++ goto err;
  2791. + }
  2792. +
  2793. + status = ath10k_mac_register(ar);
  2794. + if (status) {
  2795. +- ath10k_err("could not register to mac80211 (%d)\n", status);
  2796. ++ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
  2797. + goto err_release_fw;
  2798. + }
  2799. +
  2800. +- status = ath10k_debug_create(ar);
  2801. ++ status = ath10k_debug_register(ar);
  2802. + if (status) {
  2803. +- ath10k_err("unable to initialize debugfs\n");
  2804. ++ ath10k_err(ar, "unable to initialize debugfs\n");
  2805. + goto err_unregister_mac;
  2806. + }
  2807. +
  2808. +- return 0;
  2809. ++ status = ath10k_spectral_create(ar);
  2810. ++ if (status) {
  2811. ++ ath10k_err(ar, "failed to initialize spectral\n");
  2812. ++ goto err_debug_destroy;
  2813. ++ }
  2814. +
  2815. ++ status = ath10k_thermal_register(ar);
  2816. ++ if (status) {
  2817. ++ ath10k_err(ar, "could not register thermal device: %d\n",
  2818. ++ status);
  2819. ++ goto err_spectral_destroy;
  2820. ++ }
  2821. ++
  2822. ++ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
  2823. ++ return;
  2824. ++
  2825. ++err_spectral_destroy:
  2826. ++ ath10k_spectral_destroy(ar);
  2827. ++err_debug_destroy:
  2828. ++ ath10k_debug_destroy(ar);
  2829. + err_unregister_mac:
  2830. + ath10k_mac_unregister(ar);
  2831. + err_release_fw:
  2832. + ath10k_core_free_firmware_files(ar);
  2833. +- return status;
  2834. ++err:
  2835. ++ /* TODO: It's probably a good idea to release device from the driver
  2836. ++ * but calling device_release_driver() here will cause a deadlock.
  2837. ++ */
  2838. ++ return;
  2839. ++}
  2840. ++
  2841. ++int ath10k_core_register(struct ath10k *ar, u32 chip_id)
  2842. ++{
  2843. ++ ar->chip_id = chip_id;
  2844. ++ queue_work(ar->workqueue, &ar->register_work);
  2845. ++
  2846. ++ return 0;
  2847. + }
  2848. + EXPORT_SYMBOL(ath10k_core_register);
  2849. +
  2850. + void ath10k_core_unregister(struct ath10k *ar)
  2851. + {
  2852. ++ cancel_work_sync(&ar->register_work);
  2853. ++
  2854. ++ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
  2855. ++ return;
  2856. ++
  2857. ++ ath10k_thermal_unregister(ar);
  2858. ++ /* Stop spectral before unregistering from mac80211 to remove the
  2859. ++ * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
  2860. ++ * would be already be free'd recursively, leading to a double free.
  2861. ++ */
  2862. ++ ath10k_spectral_destroy(ar);
  2863. ++
  2864. + /* We must unregister from mac80211 before we stop HTC and HIF.
  2865. + * Otherwise we will fail to submit commands to FW and mac80211 will be
  2866. + * unhappy about callback failures. */
  2867. + ath10k_mac_unregister(ar);
  2868. +
  2869. ++ ath10k_testmode_destroy(ar);
  2870. ++
  2871. + ath10k_core_free_firmware_files(ar);
  2872. +
  2873. +- ath10k_debug_destroy(ar);
  2874. ++ ath10k_debug_unregister(ar);
  2875. + }
  2876. + EXPORT_SYMBOL(ath10k_core_unregister);
  2877. +
  2878. ++struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
  2879. ++ enum ath10k_bus bus,
  2880. ++ enum ath10k_hw_rev hw_rev,
  2881. ++ const struct ath10k_hif_ops *hif_ops)
  2882. ++{
  2883. ++ struct ath10k *ar;
  2884. ++ int ret;
  2885. ++
  2886. ++ ar = ath10k_mac_create(priv_size);
  2887. ++ if (!ar)
  2888. ++ return NULL;
  2889. ++
  2890. ++ ar->ath_common.priv = ar;
  2891. ++ ar->ath_common.hw = ar->hw;
  2892. ++ ar->dev = dev;
  2893. ++ ar->hw_rev = hw_rev;
  2894. ++ ar->hif.ops = hif_ops;
  2895. ++ ar->hif.bus = bus;
  2896. ++
  2897. ++ switch (hw_rev) {
  2898. ++ case ATH10K_HW_QCA988X:
  2899. ++ ar->regs = &qca988x_regs;
  2900. ++ break;
  2901. ++ case ATH10K_HW_QCA6174:
  2902. ++ ar->regs = &qca6174_regs;
  2903. ++ break;
  2904. ++ default:
  2905. ++ ath10k_err(ar, "unsupported core hardware revision %d\n",
  2906. ++ hw_rev);
  2907. ++ ret = -ENOTSUPP;
  2908. ++ goto err_free_mac;
  2909. ++ }
  2910. ++
  2911. ++ init_completion(&ar->scan.started);
  2912. ++ init_completion(&ar->scan.completed);
  2913. ++ init_completion(&ar->scan.on_channel);
  2914. ++ init_completion(&ar->target_suspend);
  2915. ++
  2916. ++ init_completion(&ar->install_key_done);
  2917. ++ init_completion(&ar->vdev_setup_done);
  2918. ++ init_completion(&ar->thermal.wmi_sync);
  2919. ++
  2920. ++ INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
  2921. ++
  2922. ++ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
  2923. ++ if (!ar->workqueue)
  2924. ++ goto err_free_mac;
  2925. ++
  2926. ++ mutex_init(&ar->conf_mutex);
  2927. ++ spin_lock_init(&ar->data_lock);
  2928. ++
  2929. ++ INIT_LIST_HEAD(&ar->peers);
  2930. ++ init_waitqueue_head(&ar->peer_mapping_wq);
  2931. ++ init_waitqueue_head(&ar->htt.empty_tx_wq);
  2932. ++ init_waitqueue_head(&ar->wmi.tx_credits_wq);
  2933. ++
  2934. ++ init_completion(&ar->offchan_tx_completed);
  2935. ++ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
  2936. ++ skb_queue_head_init(&ar->offchan_tx_queue);
  2937. ++
  2938. ++ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
  2939. ++ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
  2940. ++
  2941. ++ INIT_WORK(&ar->register_work, ath10k_core_register_work);
  2942. ++ INIT_WORK(&ar->restart_work, ath10k_core_restart);
  2943. ++
  2944. ++ ret = ath10k_debug_create(ar);
  2945. ++ if (ret)
  2946. ++ goto err_free_wq;
  2947. ++
  2948. ++ return ar;
  2949. ++
  2950. ++err_free_wq:
  2951. ++ destroy_workqueue(ar->workqueue);
  2952. ++
  2953. ++err_free_mac:
  2954. ++ ath10k_mac_destroy(ar);
  2955. ++
  2956. ++ return NULL;
  2957. ++}
  2958. ++EXPORT_SYMBOL(ath10k_core_create);
  2959. ++
  2960. ++void ath10k_core_destroy(struct ath10k *ar)
  2961. ++{
  2962. ++ flush_workqueue(ar->workqueue);
  2963. ++ destroy_workqueue(ar->workqueue);
  2964. ++
  2965. ++ ath10k_debug_destroy(ar);
  2966. ++ ath10k_mac_destroy(ar);
  2967. ++}
  2968. ++EXPORT_SYMBOL(ath10k_core_destroy);
  2969. ++
  2970. + MODULE_AUTHOR("Qualcomm Atheros");
  2971. + MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
  2972. + MODULE_LICENSE("Dual BSD/GPL");
  2973. +--- a/drivers/net/wireless/ath/ath10k/core.h
  2974. ++++ b/drivers/net/wireless/ath/ath10k/core.h
  2975. +@@ -22,6 +22,8 @@
  2976. + #include <linux/if_ether.h>
  2977. + #include <linux/types.h>
  2978. + #include <linux/pci.h>
  2979. ++#include <linux/uuid.h>
  2980. ++#include <linux/time.h>
  2981. +
  2982. + #include "htt.h"
  2983. + #include "htc.h"
  2984. +@@ -31,6 +33,8 @@
  2985. + #include "../ath.h"
  2986. + #include "../regd.h"
  2987. + #include "../dfs_pattern_detector.h"
  2988. ++#include "spectral.h"
  2989. ++#include "thermal.h"
  2990. +
  2991. + #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
  2992. + #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
  2993. +@@ -60,12 +64,28 @@
  2994. +
  2995. + struct ath10k;
  2996. +
  2997. ++enum ath10k_bus {
  2998. ++ ATH10K_BUS_PCI,
  2999. ++};
  3000. ++
  3001. ++static inline const char *ath10k_bus_str(enum ath10k_bus bus)
  3002. ++{
  3003. ++ switch (bus) {
  3004. ++ case ATH10K_BUS_PCI:
  3005. ++ return "pci";
  3006. ++ }
  3007. ++
  3008. ++ return "unknown";
  3009. ++}
  3010. ++
  3011. + struct ath10k_skb_cb {
  3012. + dma_addr_t paddr;
  3013. ++ u8 eid;
  3014. + u8 vdev_id;
  3015. +
  3016. + struct {
  3017. + u8 tid;
  3018. ++ u16 freq;
  3019. + bool is_offchan;
  3020. + struct ath10k_htt_txbuf *txbuf;
  3021. + u32 txbuf_paddr;
  3022. +@@ -77,6 +97,11 @@ struct ath10k_skb_cb {
  3023. + } bcn;
  3024. + } __packed;
  3025. +
  3026. ++struct ath10k_skb_rxcb {
  3027. ++ dma_addr_t paddr;
  3028. ++ struct hlist_node hlist;
  3029. ++};
  3030. ++
  3031. + static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
  3032. + {
  3033. + BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
  3034. +@@ -84,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH1
  3035. + return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
  3036. + }
  3037. +
  3038. ++static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
  3039. ++{
  3040. ++ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
  3041. ++ return (struct ath10k_skb_rxcb *)skb->cb;
  3042. ++}
  3043. ++
  3044. ++#define ATH10K_RXCB_SKB(rxcb) \
  3045. ++ container_of((void *)rxcb, struct sk_buff, cb)
  3046. ++
  3047. + static inline u32 host_interest_item_address(u32 item_offset)
  3048. + {
  3049. + return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
  3050. +@@ -93,8 +127,6 @@ struct ath10k_bmi {
  3051. + bool done_sent;
  3052. + };
  3053. +
  3054. +-#define ATH10K_MAX_MEM_REQS 16
  3055. +-
  3056. + struct ath10k_mem_chunk {
  3057. + void *vaddr;
  3058. + dma_addr_t paddr;
  3059. +@@ -103,26 +135,52 @@ struct ath10k_mem_chunk {
  3060. + };
  3061. +
  3062. + struct ath10k_wmi {
  3063. ++ enum ath10k_fw_wmi_op_version op_version;
  3064. + enum ath10k_htc_ep_id eid;
  3065. + struct completion service_ready;
  3066. + struct completion unified_ready;
  3067. + wait_queue_head_t tx_credits_wq;
  3068. ++ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
  3069. + struct wmi_cmd_map *cmd;
  3070. + struct wmi_vdev_param_map *vdev_param;
  3071. + struct wmi_pdev_param_map *pdev_param;
  3072. ++ const struct wmi_ops *ops;
  3073. +
  3074. + u32 num_mem_chunks;
  3075. +- struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
  3076. ++ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
  3077. + };
  3078. +
  3079. +-struct ath10k_peer_stat {
  3080. ++struct ath10k_fw_stats_peer {
  3081. ++ struct list_head list;
  3082. ++
  3083. + u8 peer_macaddr[ETH_ALEN];
  3084. + u32 peer_rssi;
  3085. + u32 peer_tx_rate;
  3086. + u32 peer_rx_rate; /* 10x only */
  3087. + };
  3088. +
  3089. +-struct ath10k_target_stats {
  3090. ++struct ath10k_fw_stats_vdev {
  3091. ++ struct list_head list;
  3092. ++
  3093. ++ u32 vdev_id;
  3094. ++ u32 beacon_snr;
  3095. ++ u32 data_snr;
  3096. ++ u32 num_tx_frames[4];
  3097. ++ u32 num_rx_frames;
  3098. ++ u32 num_tx_frames_retries[4];
  3099. ++ u32 num_tx_frames_failures[4];
  3100. ++ u32 num_rts_fail;
  3101. ++ u32 num_rts_success;
  3102. ++ u32 num_rx_err;
  3103. ++ u32 num_rx_discard;
  3104. ++ u32 num_tx_not_acked;
  3105. ++ u32 tx_rate_history[10];
  3106. ++ u32 beacon_rssi_history[10];
  3107. ++};
  3108. ++
  3109. ++struct ath10k_fw_stats_pdev {
  3110. ++ struct list_head list;
  3111. ++
  3112. + /* PDEV stats */
  3113. + s32 ch_noise_floor;
  3114. + u32 tx_frame_count;
  3115. +@@ -177,15 +235,12 @@ struct ath10k_target_stats {
  3116. + s32 phy_errs;
  3117. + s32 phy_err_drop;
  3118. + s32 mpdu_errs;
  3119. ++};
  3120. +
  3121. +- /* VDEV STATS */
  3122. +-
  3123. +- /* PEER STATS */
  3124. +- u8 peers;
  3125. +- struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
  3126. +-
  3127. +- /* TODO: Beacon filter stats */
  3128. +-
  3129. ++struct ath10k_fw_stats {
  3130. ++ struct list_head pdevs;
  3131. ++ struct list_head vdevs;
  3132. ++ struct list_head peers;
  3133. + };
  3134. +
  3135. + struct ath10k_dfs_stats {
  3136. +@@ -203,6 +258,8 @@ struct ath10k_peer {
  3137. + int vdev_id;
  3138. + u8 addr[ETH_ALEN];
  3139. + DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
  3140. ++
  3141. ++ /* protected by ar->data_lock */
  3142. + struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
  3143. + };
  3144. +
  3145. +@@ -216,10 +273,21 @@ struct ath10k_sta {
  3146. + u32 smps;
  3147. +
  3148. + struct work_struct update_wk;
  3149. ++
  3150. ++#ifdef CPTCFG_MAC80211_DEBUGFS
  3151. ++ /* protected by conf_mutex */
  3152. ++ bool aggr_mode;
  3153. ++#endif
  3154. + };
  3155. +
  3156. + #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
  3157. +
  3158. ++enum ath10k_beacon_state {
  3159. ++ ATH10K_BEACON_SCHEDULED = 0,
  3160. ++ ATH10K_BEACON_SENDING,
  3161. ++ ATH10K_BEACON_SENT,
  3162. ++};
  3163. ++
  3164. + struct ath10k_vif {
  3165. + struct list_head list;
  3166. +
  3167. +@@ -230,20 +298,22 @@ struct ath10k_vif {
  3168. + u32 dtim_period;
  3169. + struct sk_buff *beacon;
  3170. + /* protected by data_lock */
  3171. +- bool beacon_sent;
  3172. ++ enum ath10k_beacon_state beacon_state;
  3173. ++ void *beacon_buf;
  3174. ++ dma_addr_t beacon_paddr;
  3175. +
  3176. + struct ath10k *ar;
  3177. + struct ieee80211_vif *vif;
  3178. +
  3179. + bool is_started;
  3180. + bool is_up;
  3181. ++ bool spectral_enabled;
  3182. ++ bool ps;
  3183. + u32 aid;
  3184. + u8 bssid[ETH_ALEN];
  3185. +
  3186. +- struct work_struct wep_key_work;
  3187. + struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
  3188. +- u8 def_wep_key_idx;
  3189. +- u8 def_wep_key_newidx;
  3190. ++ s8 def_wep_key_idx;
  3191. +
  3192. + u16 tx_seq_no;
  3193. +
  3194. +@@ -269,6 +339,8 @@ struct ath10k_vif {
  3195. + u8 force_sgi;
  3196. + bool use_cts_prot;
  3197. + int num_legacy_stations;
  3198. ++ int txpower;
  3199. ++ struct wmi_wmm_params_all_arg wmm_params;
  3200. + };
  3201. +
  3202. + struct ath10k_vif_iter {
  3203. +@@ -276,20 +348,38 @@ struct ath10k_vif_iter {
  3204. + struct ath10k_vif *arvif;
  3205. + };
  3206. +
  3207. ++/* used for crash-dump storage, protected by data-lock */
  3208. ++struct ath10k_fw_crash_data {
  3209. ++ bool crashed_since_read;
  3210. ++
  3211. ++ uuid_le uuid;
  3212. ++ struct timespec timestamp;
  3213. ++ __le32 registers[REG_DUMP_COUNT_QCA988X];
  3214. ++};
  3215. ++
  3216. + struct ath10k_debug {
  3217. + struct dentry *debugfs_phy;
  3218. +
  3219. +- struct ath10k_target_stats target_stats;
  3220. +- u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
  3221. +-
  3222. +- struct completion event_stats_compl;
  3223. ++ struct ath10k_fw_stats fw_stats;
  3224. ++ struct completion fw_stats_complete;
  3225. ++ bool fw_stats_done;
  3226. +
  3227. + unsigned long htt_stats_mask;
  3228. + struct delayed_work htt_stats_dwork;
  3229. + struct ath10k_dfs_stats dfs_stats;
  3230. + struct ath_dfs_pool_stats dfs_pool_stats;
  3231. +
  3232. ++ /* protected by conf_mutex */
  3233. + u32 fw_dbglog_mask;
  3234. ++ u32 fw_dbglog_level;
  3235. ++ u32 pktlog_filter;
  3236. ++ u32 reg_addr;
  3237. ++ u32 nf_cal_period;
  3238. ++
  3239. ++ u8 htt_max_amsdu;
  3240. ++ u8 htt_max_ampdu;
  3241. ++
  3242. ++ struct ath10k_fw_crash_data *fw_crash_data;
  3243. + };
  3244. +
  3245. + enum ath10k_state {
  3246. +@@ -312,13 +402,24 @@ enum ath10k_state {
  3247. + * prevents completion timeouts and makes the driver more responsive to
  3248. + * userspace commands. This is also prevents recursive recovery. */
  3249. + ATH10K_STATE_WEDGED,
  3250. ++
  3251. ++ /* factory tests */
  3252. ++ ATH10K_STATE_UTF,
  3253. ++};
  3254. ++
  3255. ++enum ath10k_firmware_mode {
  3256. ++ /* the default mode, standard 802.11 functionality */
  3257. ++ ATH10K_FIRMWARE_MODE_NORMAL,
  3258. ++
  3259. ++ /* factory tests etc */
  3260. ++ ATH10K_FIRMWARE_MODE_UTF,
  3261. + };
  3262. +
  3263. + enum ath10k_fw_features {
  3264. + /* wmi_mgmt_rx_hdr contains extra RSSI information */
  3265. + ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
  3266. +
  3267. +- /* firmware from 10X branch */
  3268. ++ /* Firmware from 10X branch. Deprecated, don't use in new code. */
  3269. + ATH10K_FW_FEATURE_WMI_10X = 1,
  3270. +
  3271. + /* firmware support tx frame management over WMI, otherwise it's HTT */
  3272. +@@ -327,6 +428,18 @@ enum ath10k_fw_features {
  3273. + /* Firmware does not support P2P */
  3274. + ATH10K_FW_FEATURE_NO_P2P = 3,
  3275. +
  3276. ++ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
  3277. ++ * bit is required to be set as well. Deprecated, don't use in new
  3278. ++ * code.
  3279. ++ */
  3280. ++ ATH10K_FW_FEATURE_WMI_10_2 = 4,
  3281. ++
  3282. ++ /* Some firmware revisions lack proper multi-interface client powersave
  3283. ++ * implementation. Enabling PS could result in connection drops,
  3284. ++ * traffic stalls, etc.
  3285. ++ */
  3286. ++ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
  3287. ++
  3288. + /* keep last */
  3289. + ATH10K_FW_FEATURE_COUNT,
  3290. + };
  3291. +@@ -334,15 +447,64 @@ enum ath10k_fw_features {
  3292. + enum ath10k_dev_flags {
  3293. + /* Indicates that ath10k device is during CAC phase of DFS */
  3294. + ATH10K_CAC_RUNNING,
  3295. +- ATH10K_FLAG_FIRST_BOOT_DONE,
  3296. ++ ATH10K_FLAG_CORE_REGISTERED,
  3297. ++
  3298. ++ /* Device has crashed and needs to restart. This indicates any pending
  3299. ++ * waiters should immediately cancel instead of waiting for a time out.
  3300. ++ */
  3301. ++ ATH10K_FLAG_CRASH_FLUSH,
  3302. ++};
  3303. ++
  3304. ++enum ath10k_cal_mode {
  3305. ++ ATH10K_CAL_MODE_FILE,
  3306. ++ ATH10K_CAL_MODE_OTP,
  3307. ++ ATH10K_CAL_MODE_DT,
  3308. ++};
  3309. ++
  3310. ++static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
  3311. ++{
  3312. ++ switch (mode) {
  3313. ++ case ATH10K_CAL_MODE_FILE:
  3314. ++ return "file";
  3315. ++ case ATH10K_CAL_MODE_OTP:
  3316. ++ return "otp";
  3317. ++ case ATH10K_CAL_MODE_DT:
  3318. ++ return "dt";
  3319. ++ }
  3320. ++
  3321. ++ return "unknown";
  3322. ++}
  3323. ++
  3324. ++enum ath10k_scan_state {
  3325. ++ ATH10K_SCAN_IDLE,
  3326. ++ ATH10K_SCAN_STARTING,
  3327. ++ ATH10K_SCAN_RUNNING,
  3328. ++ ATH10K_SCAN_ABORTING,
  3329. + };
  3330. +
  3331. ++static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
  3332. ++{
  3333. ++ switch (state) {
  3334. ++ case ATH10K_SCAN_IDLE:
  3335. ++ return "idle";
  3336. ++ case ATH10K_SCAN_STARTING:
  3337. ++ return "starting";
  3338. ++ case ATH10K_SCAN_RUNNING:
  3339. ++ return "running";
  3340. ++ case ATH10K_SCAN_ABORTING:
  3341. ++ return "aborting";
  3342. ++ }
  3343. ++
  3344. ++ return "unknown";
  3345. ++}
  3346. ++
  3347. + struct ath10k {
  3348. + struct ath_common ath_common;
  3349. + struct ieee80211_hw *hw;
  3350. + struct device *dev;
  3351. + u8 mac_addr[ETH_ALEN];
  3352. +
  3353. ++ enum ath10k_hw_rev hw_rev;
  3354. + u32 chip_id;
  3355. + u32 target_version;
  3356. + u8 fw_version_major;
  3357. +@@ -358,18 +520,16 @@ struct ath10k {
  3358. +
  3359. + DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
  3360. +
  3361. +- struct targetdef *targetdef;
  3362. +- struct hostdef *hostdef;
  3363. +-
  3364. + bool p2p;
  3365. +
  3366. + struct {
  3367. +- void *priv;
  3368. ++ enum ath10k_bus bus;
  3369. + const struct ath10k_hif_ops *ops;
  3370. + } hif;
  3371. +
  3372. + struct completion target_suspend;
  3373. +
  3374. ++ const struct ath10k_hw_regs *regs;
  3375. + struct ath10k_bmi bmi;
  3376. + struct ath10k_wmi wmi;
  3377. + struct ath10k_htc htc;
  3378. +@@ -379,12 +539,15 @@ struct ath10k {
  3379. + u32 id;
  3380. + const char *name;
  3381. + u32 patch_load_addr;
  3382. ++ int uart_pin;
  3383. +
  3384. + struct ath10k_hw_params_fw {
  3385. + const char *dir;
  3386. + const char *fw;
  3387. + const char *otp;
  3388. + const char *board;
  3389. ++ size_t board_size;
  3390. ++ size_t board_ext_size;
  3391. + } fw;
  3392. + } hw_params;
  3393. +
  3394. +@@ -400,16 +563,18 @@ struct ath10k {
  3395. + const void *firmware_data;
  3396. + size_t firmware_len;
  3397. +
  3398. ++ const struct firmware *cal_file;
  3399. ++
  3400. + int fw_api;
  3401. ++ enum ath10k_cal_mode cal_mode;
  3402. +
  3403. + struct {
  3404. + struct completion started;
  3405. + struct completion completed;
  3406. + struct completion on_channel;
  3407. +- struct timer_list timeout;
  3408. ++ struct delayed_work timeout;
  3409. ++ enum ath10k_scan_state state;
  3410. + bool is_roc;
  3411. +- bool in_progress;
  3412. +- bool aborting;
  3413. + int vdev_id;
  3414. + int roc_freq;
  3415. + } scan;
  3416. +@@ -427,8 +592,7 @@ struct ath10k {
  3417. + /* current operating channel definition */
  3418. + struct cfg80211_chan_def chandef;
  3419. +
  3420. +- int free_vdev_map;
  3421. +- bool promisc;
  3422. ++ unsigned long long free_vdev_map;
  3423. + bool monitor;
  3424. + int monitor_vdev_id;
  3425. + bool monitor_started;
  3426. +@@ -440,7 +604,12 @@ struct ath10k {
  3427. + bool radar_enabled;
  3428. + int num_started_vdevs;
  3429. +
  3430. +- struct wmi_pdev_set_wmm_params_arg wmm_params;
  3431. ++ /* Protected by conf-mutex */
  3432. ++ u8 supp_tx_chainmask;
  3433. ++ u8 supp_rx_chainmask;
  3434. ++ u8 cfg_tx_chainmask;
  3435. ++ u8 cfg_rx_chainmask;
  3436. ++
  3437. + struct completion install_key_done;
  3438. +
  3439. + struct completion vdev_setup_done;
  3440. +@@ -457,8 +626,13 @@ struct ath10k {
  3441. + struct list_head peers;
  3442. + wait_queue_head_t peer_mapping_wq;
  3443. +
  3444. +- /* number of created peers; protected by data_lock */
  3445. ++ /* protected by conf_mutex */
  3446. + int num_peers;
  3447. ++ int num_stations;
  3448. ++
  3449. ++ int max_num_peers;
  3450. ++ int max_num_stations;
  3451. ++ int max_num_vdevs;
  3452. +
  3453. + struct work_struct offchan_tx_work;
  3454. + struct sk_buff_head offchan_tx_queue;
  3455. +@@ -470,6 +644,7 @@ struct ath10k {
  3456. +
  3457. + enum ath10k_state state;
  3458. +
  3459. ++ struct work_struct register_work;
  3460. + struct work_struct restart_work;
  3461. +
  3462. + /* cycle count is reported twice for each visited channel during scan.
  3463. +@@ -483,13 +658,46 @@ struct ath10k {
  3464. + #ifdef CPTCFG_ATH10K_DEBUGFS
  3465. + struct ath10k_debug debug;
  3466. + #endif
  3467. ++
  3468. ++ struct {
  3469. ++ /* relay(fs) channel for spectral scan */
  3470. ++ struct rchan *rfs_chan_spec_scan;
  3471. ++
  3472. ++ /* spectral_mode and spec_config are protected by conf_mutex */
  3473. ++ enum ath10k_spectral_mode mode;
  3474. ++ struct ath10k_spec_scan config;
  3475. ++ } spectral;
  3476. ++
  3477. ++ struct {
  3478. ++ /* protected by conf_mutex */
  3479. ++ const struct firmware *utf;
  3480. ++ DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
  3481. ++ enum ath10k_fw_wmi_op_version orig_wmi_op_version;
  3482. ++
  3483. ++ /* protected by data_lock */
  3484. ++ bool utf_monitor;
  3485. ++ } testmode;
  3486. ++
  3487. ++ struct {
  3488. ++ /* protected by data_lock */
  3489. ++ u32 fw_crash_counter;
  3490. ++ u32 fw_warm_reset_counter;
  3491. ++ u32 fw_cold_reset_counter;
  3492. ++ } stats;
  3493. ++
  3494. ++ struct ath10k_thermal thermal;
  3495. ++
  3496. ++ /* must be last */
  3497. ++ u8 drv_priv[0] __aligned(sizeof(void *));
  3498. + };
  3499. +
  3500. +-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
  3501. ++struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
  3502. ++ enum ath10k_bus bus,
  3503. ++ enum ath10k_hw_rev hw_rev,
  3504. + const struct ath10k_hif_ops *hif_ops);
  3505. + void ath10k_core_destroy(struct ath10k *ar);
  3506. +
  3507. +-int ath10k_core_start(struct ath10k *ar);
  3508. ++int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
  3509. + int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
  3510. + void ath10k_core_stop(struct ath10k *ar);
  3511. + int ath10k_core_register(struct ath10k *ar, u32 chip_id);
  3512. +--- a/drivers/net/wireless/ath/ath10k/debug.c
  3513. ++++ b/drivers/net/wireless/ath/ath10k/debug.c
  3514. +@@ -17,107 +17,176 @@
  3515. +
  3516. + #include <linux/module.h>
  3517. + #include <linux/debugfs.h>
  3518. ++#include <linux/vmalloc.h>
  3519. ++#include <linux/utsname.h>
  3520. +
  3521. + #include "core.h"
  3522. + #include "debug.h"
  3523. ++#include "hif.h"
  3524. ++#include "wmi-ops.h"
  3525. +
  3526. + /* ms */
  3527. + #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
  3528. +
  3529. +-static int ath10k_printk(const char *level, const char *fmt, ...)
  3530. +-{
  3531. +- struct va_format vaf;
  3532. +- va_list args;
  3533. +- int rtn;
  3534. ++#define ATH10K_FW_CRASH_DUMP_VERSION 1
  3535. +
  3536. +- va_start(args, fmt);
  3537. ++/**
  3538. ++ * enum ath10k_fw_crash_dump_type - types of data in the dump file
  3539. ++ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
  3540. ++ */
  3541. ++enum ath10k_fw_crash_dump_type {
  3542. ++ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
  3543. +
  3544. +- vaf.fmt = fmt;
  3545. +- vaf.va = &args;
  3546. ++ ATH10K_FW_CRASH_DUMP_MAX,
  3547. ++};
  3548. +
  3549. +- rtn = printk("%sath10k: %pV", level, &vaf);
  3550. ++struct ath10k_tlv_dump_data {
  3551. ++ /* see ath10k_fw_crash_dump_type above */
  3552. ++ __le32 type;
  3553. +
  3554. +- va_end(args);
  3555. ++ /* in bytes */
  3556. ++ __le32 tlv_len;
  3557. +
  3558. +- return rtn;
  3559. +-}
  3560. ++ /* pad to 32-bit boundaries as needed */
  3561. ++ u8 tlv_data[];
  3562. ++} __packed;
  3563. ++
  3564. ++struct ath10k_dump_file_data {
  3565. ++ /* dump file information */
  3566. ++
  3567. ++ /* "ATH10K-FW-DUMP" */
  3568. ++ char df_magic[16];
  3569. ++
  3570. ++ __le32 len;
  3571. ++
  3572. ++ /* file dump version */
  3573. ++ __le32 version;
  3574. ++
  3575. ++ /* some info we can get from ath10k struct that might help */
  3576. ++
  3577. ++ u8 uuid[16];
  3578. ++
  3579. ++ __le32 chip_id;
  3580. ++
  3581. ++ /* 0 for now, in place for later hardware */
  3582. ++ __le32 bus_type;
  3583. ++
  3584. ++ __le32 target_version;
  3585. ++ __le32 fw_version_major;
  3586. ++ __le32 fw_version_minor;
  3587. ++ __le32 fw_version_release;
  3588. ++ __le32 fw_version_build;
  3589. ++ __le32 phy_capability;
  3590. ++ __le32 hw_min_tx_power;
  3591. ++ __le32 hw_max_tx_power;
  3592. ++ __le32 ht_cap_info;
  3593. ++ __le32 vht_cap_info;
  3594. ++ __le32 num_rf_chains;
  3595. ++
  3596. ++ /* firmware version string */
  3597. ++ char fw_ver[ETHTOOL_FWVERS_LEN];
  3598. ++
  3599. ++ /* Kernel related information */
  3600. ++
  3601. ++ /* time-of-day stamp */
  3602. ++ __le64 tv_sec;
  3603. ++
  3604. ++ /* time-of-day stamp, nano-seconds */
  3605. ++ __le64 tv_nsec;
  3606. ++
  3607. ++ /* LINUX_VERSION_CODE */
  3608. ++ __le32 kernel_ver_code;
  3609. ++
  3610. ++ /* VERMAGIC_STRING */
  3611. ++ char kernel_ver[64];
  3612. +
  3613. +-int ath10k_info(const char *fmt, ...)
  3614. ++ /* room for growth w/out changing binary format */
  3615. ++ u8 unused[128];
  3616. ++
  3617. ++ /* struct ath10k_tlv_dump_data + more */
  3618. ++ u8 data[0];
  3619. ++} __packed;
  3620. ++
  3621. ++void ath10k_info(struct ath10k *ar, const char *fmt, ...)
  3622. + {
  3623. + struct va_format vaf = {
  3624. + .fmt = fmt,
  3625. + };
  3626. + va_list args;
  3627. +- int ret;
  3628. +
  3629. + va_start(args, fmt);
  3630. + vaf.va = &args;
  3631. +- ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
  3632. +- trace_ath10k_log_info(&vaf);
  3633. ++ dev_info(ar->dev, "%pV", &vaf);
  3634. ++ trace_ath10k_log_info(ar, &vaf);
  3635. + va_end(args);
  3636. +-
  3637. +- return ret;
  3638. + }
  3639. + EXPORT_SYMBOL(ath10k_info);
  3640. +
  3641. +-int ath10k_err(const char *fmt, ...)
  3642. ++void ath10k_print_driver_info(struct ath10k *ar)
  3643. ++{
  3644. ++ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
  3645. ++ ar->hw_params.name,
  3646. ++ ar->target_version,
  3647. ++ ar->chip_id,
  3648. ++ ar->hw->wiphy->fw_version,
  3649. ++ ar->fw_api,
  3650. ++ ar->htt.target_version_major,
  3651. ++ ar->htt.target_version_minor,
  3652. ++ ar->wmi.op_version,
  3653. ++ ath10k_cal_mode_str(ar->cal_mode),
  3654. ++ ar->max_num_stations);
  3655. ++ ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
  3656. ++ config_enabled(CPTCFG_ATH10K_DEBUG),
  3657. ++ config_enabled(CPTCFG_ATH10K_DEBUGFS),
  3658. ++ config_enabled(CPTCFG_ATH10K_TRACING),
  3659. ++ config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED),
  3660. ++ config_enabled(CPTCFG_NL80211_TESTMODE));
  3661. ++}
  3662. ++EXPORT_SYMBOL(ath10k_print_driver_info);
  3663. ++
  3664. ++void ath10k_err(struct ath10k *ar, const char *fmt, ...)
  3665. + {
  3666. + struct va_format vaf = {
  3667. + .fmt = fmt,
  3668. + };
  3669. + va_list args;
  3670. +- int ret;
  3671. +
  3672. + va_start(args, fmt);
  3673. + vaf.va = &args;
  3674. +- ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
  3675. +- trace_ath10k_log_err(&vaf);
  3676. ++ dev_err(ar->dev, "%pV", &vaf);
  3677. ++ trace_ath10k_log_err(ar, &vaf);
  3678. + va_end(args);
  3679. +-
  3680. +- return ret;
  3681. + }
  3682. + EXPORT_SYMBOL(ath10k_err);
  3683. +
  3684. +-int ath10k_warn(const char *fmt, ...)
  3685. ++void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
  3686. + {
  3687. + struct va_format vaf = {
  3688. + .fmt = fmt,
  3689. + };
  3690. + va_list args;
  3691. +- int ret = 0;
  3692. +
  3693. + va_start(args, fmt);
  3694. + vaf.va = &args;
  3695. +-
  3696. +- if (net_ratelimit())
  3697. +- ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
  3698. +-
  3699. +- trace_ath10k_log_warn(&vaf);
  3700. ++ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
  3701. ++ trace_ath10k_log_warn(ar, &vaf);
  3702. +
  3703. + va_end(args);
  3704. +-
  3705. +- return ret;
  3706. + }
  3707. + EXPORT_SYMBOL(ath10k_warn);
  3708. +
  3709. + #ifdef CPTCFG_ATH10K_DEBUGFS
  3710. +
  3711. +-void ath10k_debug_read_service_map(struct ath10k *ar,
  3712. +- void *service_map,
  3713. +- size_t map_size)
  3714. +-{
  3715. +- memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
  3716. +-}
  3717. +-
  3718. + static ssize_t ath10k_read_wmi_services(struct file *file,
  3719. + char __user *user_buf,
  3720. + size_t count, loff_t *ppos)
  3721. + {
  3722. + struct ath10k *ar = file->private_data;
  3723. + char *buf;
  3724. +- unsigned int len = 0, buf_len = 1500;
  3725. +- const char *status;
  3726. ++ unsigned int len = 0, buf_len = 4096;
  3727. ++ const char *name;
  3728. + ssize_t ret_cnt;
  3729. ++ bool enabled;
  3730. + int i;
  3731. +
  3732. + buf = kzalloc(buf_len, GFP_KERNEL);
  3733. +@@ -129,16 +198,25 @@ static ssize_t ath10k_read_wmi_services(
  3734. + if (len > buf_len)
  3735. + len = buf_len;
  3736. +
  3737. +- for (i = 0; i < WMI_SERVICE_LAST; i++) {
  3738. +- if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
  3739. +- status = "enabled";
  3740. +- else
  3741. +- status = "disabled";
  3742. ++ spin_lock_bh(&ar->data_lock);
  3743. ++ for (i = 0; i < WMI_SERVICE_MAX; i++) {
  3744. ++ enabled = test_bit(i, ar->wmi.svc_map);
  3745. ++ name = wmi_service_name(i);
  3746. ++
  3747. ++ if (!name) {
  3748. ++ if (enabled)
  3749. ++ len += scnprintf(buf + len, buf_len - len,
  3750. ++ "%-40s %s (bit %d)\n",
  3751. ++ "unknown", "enabled", i);
  3752. ++
  3753. ++ continue;
  3754. ++ }
  3755. +
  3756. + len += scnprintf(buf + len, buf_len - len,
  3757. +- "0x%02x - %20s - %s\n",
  3758. +- i, wmi_service_name(i), status);
  3759. ++ "%-40s %s\n",
  3760. ++ name, enabled ? "enabled" : "-");
  3761. + }
  3762. ++ spin_unlock_bh(&ar->data_lock);
  3763. +
  3764. + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  3765. +
  3766. +@@ -155,169 +233,221 @@ static const struct file_operations fops
  3767. + .llseek = default_llseek,
  3768. + };
  3769. +
  3770. +-void ath10k_debug_read_target_stats(struct ath10k *ar,
  3771. +- struct wmi_stats_event *ev)
  3772. ++static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
  3773. + {
  3774. +- u8 *tmp = ev->data;
  3775. +- struct ath10k_target_stats *stats;
  3776. +- int num_pdev_stats, num_vdev_stats, num_peer_stats;
  3777. +- struct wmi_pdev_stats_10x *ps;
  3778. +- int i;
  3779. ++ struct ath10k_fw_stats_pdev *i, *tmp;
  3780. +
  3781. ++ list_for_each_entry_safe(i, tmp, head, list) {
  3782. ++ list_del(&i->list);
  3783. ++ kfree(i);
  3784. ++ }
  3785. ++}
  3786. ++
  3787. ++static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
  3788. ++{
  3789. ++ struct ath10k_fw_stats_vdev *i, *tmp;
  3790. ++
  3791. ++ list_for_each_entry_safe(i, tmp, head, list) {
  3792. ++ list_del(&i->list);
  3793. ++ kfree(i);
  3794. ++ }
  3795. ++}
  3796. ++
  3797. ++static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
  3798. ++{
  3799. ++ struct ath10k_fw_stats_peer *i, *tmp;
  3800. ++
  3801. ++ list_for_each_entry_safe(i, tmp, head, list) {
  3802. ++ list_del(&i->list);
  3803. ++ kfree(i);
  3804. ++ }
  3805. ++}
  3806. ++
  3807. ++static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
  3808. ++{
  3809. + spin_lock_bh(&ar->data_lock);
  3810. ++ ar->debug.fw_stats_done = false;
  3811. ++ ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
  3812. ++ ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
  3813. ++ ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
  3814. ++ spin_unlock_bh(&ar->data_lock);
  3815. ++}
  3816. +
  3817. +- stats = &ar->debug.target_stats;
  3818. ++static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
  3819. ++{
  3820. ++ struct ath10k_fw_stats_peer *i;
  3821. ++ size_t num = 0;
  3822. +
  3823. +- num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
  3824. +- num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
  3825. +- num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
  3826. +-
  3827. +- if (num_pdev_stats) {
  3828. +- ps = (struct wmi_pdev_stats_10x *)tmp;
  3829. +-
  3830. +- stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
  3831. +- stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
  3832. +- stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
  3833. +- stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
  3834. +- stats->cycle_count = __le32_to_cpu(ps->cycle_count);
  3835. +- stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
  3836. +- stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
  3837. +-
  3838. +- stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
  3839. +- stats->comp_delivered =
  3840. +- __le32_to_cpu(ps->wal.tx.comp_delivered);
  3841. +- stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
  3842. +- stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
  3843. +- stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
  3844. +- stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
  3845. +- stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
  3846. +- stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
  3847. +- stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
  3848. +- stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
  3849. +- stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
  3850. +- stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
  3851. +- stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
  3852. +- stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
  3853. +- stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
  3854. +- stats->sw_retry_failure =
  3855. +- __le32_to_cpu(ps->wal.tx.sw_retry_failure);
  3856. +- stats->illgl_rate_phy_err =
  3857. +- __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
  3858. +- stats->pdev_cont_xretry =
  3859. +- __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
  3860. +- stats->pdev_tx_timeout =
  3861. +- __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
  3862. +- stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
  3863. +- stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
  3864. +- stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
  3865. +-
  3866. +- stats->mid_ppdu_route_change =
  3867. +- __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
  3868. +- stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
  3869. +- stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
  3870. +- stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
  3871. +- stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
  3872. +- stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
  3873. +- stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
  3874. +- stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
  3875. +- stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
  3876. +- stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
  3877. +- stats->oversize_amsdu =
  3878. +- __le32_to_cpu(ps->wal.rx.oversize_amsdu);
  3879. +- stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
  3880. +- stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
  3881. +- stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
  3882. +-
  3883. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
  3884. +- ar->fw_features)) {
  3885. +- stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
  3886. +- stats->rts_bad = __le32_to_cpu(ps->rts_bad);
  3887. +- stats->rts_good = __le32_to_cpu(ps->rts_good);
  3888. +- stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
  3889. +- stats->no_beacons = __le32_to_cpu(ps->no_beacons);
  3890. +- stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
  3891. +- tmp += sizeof(struct wmi_pdev_stats_10x);
  3892. +- } else {
  3893. +- tmp += sizeof(struct wmi_pdev_stats_old);
  3894. +- }
  3895. ++ list_for_each_entry(i, head, list)
  3896. ++ ++num;
  3897. ++
  3898. ++ return num;
  3899. ++}
  3900. ++
  3901. ++static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
  3902. ++{
  3903. ++ struct ath10k_fw_stats_vdev *i;
  3904. ++ size_t num = 0;
  3905. ++
  3906. ++ list_for_each_entry(i, head, list)
  3907. ++ ++num;
  3908. ++
  3909. ++ return num;
  3910. ++}
  3911. ++
  3912. ++void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
  3913. ++{
  3914. ++ struct ath10k_fw_stats stats = {};
  3915. ++ bool is_start, is_started, is_end;
  3916. ++ size_t num_peers;
  3917. ++ size_t num_vdevs;
  3918. ++ int ret;
  3919. ++
  3920. ++ INIT_LIST_HEAD(&stats.pdevs);
  3921. ++ INIT_LIST_HEAD(&stats.vdevs);
  3922. ++ INIT_LIST_HEAD(&stats.peers);
  3923. ++
  3924. ++ spin_lock_bh(&ar->data_lock);
  3925. ++ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
  3926. ++ if (ret) {
  3927. ++ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
  3928. ++ goto unlock;
  3929. + }
  3930. +
  3931. +- /* 0 or max vdevs */
  3932. +- /* Currently firmware does not support VDEV stats */
  3933. +- if (num_vdev_stats) {
  3934. +- struct wmi_vdev_stats *vdev_stats;
  3935. +-
  3936. +- for (i = 0; i < num_vdev_stats; i++) {
  3937. +- vdev_stats = (struct wmi_vdev_stats *)tmp;
  3938. +- tmp += sizeof(struct wmi_vdev_stats);
  3939. +- }
  3940. ++ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
  3941. ++ * splits the stats data and delivers it in a ping-pong fashion of
  3942. ++ * request cmd-update event.
  3943. ++ *
  3944. ++ * However there is no explicit end-of-data. Instead start-of-data is
  3945. ++ * used as an implicit one. This works as follows:
  3946. ++ * a) discard stat update events until one with pdev stats is
  3947. ++ * delivered - this skips session started at end of (b)
  3948. ++ * b) consume stat update events until another one with pdev stats is
  3949. ++ * delivered which is treated as end-of-data and is itself discarded
  3950. ++ */
  3951. ++
  3952. ++ if (ar->debug.fw_stats_done) {
  3953. ++ ath10k_warn(ar, "received unsolicited stats update event\n");
  3954. ++ goto free;
  3955. + }
  3956. +
  3957. +- if (num_peer_stats) {
  3958. +- struct wmi_peer_stats_10x *peer_stats;
  3959. +- struct ath10k_peer_stat *s;
  3960. +-
  3961. +- stats->peers = num_peer_stats;
  3962. +-
  3963. +- for (i = 0; i < num_peer_stats; i++) {
  3964. +- peer_stats = (struct wmi_peer_stats_10x *)tmp;
  3965. +- s = &stats->peer_stat[i];
  3966. +-
  3967. +- memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
  3968. +- ETH_ALEN);
  3969. +- s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
  3970. +- s->peer_tx_rate =
  3971. +- __le32_to_cpu(peer_stats->peer_tx_rate);
  3972. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
  3973. +- ar->fw_features)) {
  3974. +- s->peer_rx_rate =
  3975. +- __le32_to_cpu(peer_stats->peer_rx_rate);
  3976. +- tmp += sizeof(struct wmi_peer_stats_10x);
  3977. +-
  3978. +- } else {
  3979. +- tmp += sizeof(struct wmi_peer_stats_old);
  3980. +- }
  3981. ++ num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
  3982. ++ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
  3983. ++ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
  3984. ++ !list_empty(&stats.pdevs));
  3985. ++ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
  3986. ++ !list_empty(&stats.pdevs));
  3987. ++
  3988. ++ if (is_start)
  3989. ++ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
  3990. ++
  3991. ++ if (is_end)
  3992. ++ ar->debug.fw_stats_done = true;
  3993. ++
  3994. ++ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
  3995. ++
  3996. ++ if (is_started && !is_end) {
  3997. ++ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
  3998. ++ /* Although this is unlikely impose a sane limit to
  3999. ++ * prevent firmware from DoS-ing the host.
  4000. ++ */
  4001. ++ ath10k_warn(ar, "dropping fw peer stats\n");
  4002. ++ goto free;
  4003. + }
  4004. ++
  4005. ++ if (num_vdevs >= BITS_PER_LONG) {
  4006. ++ ath10k_warn(ar, "dropping fw vdev stats\n");
  4007. ++ goto free;
  4008. ++ }
  4009. ++
  4010. ++ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
  4011. ++ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
  4012. + }
  4013. +
  4014. ++ complete(&ar->debug.fw_stats_complete);
  4015. ++
  4016. ++free:
  4017. ++ /* In some cases lists have been spliced and cleared. Free up
  4018. ++ * resources if that is not the case.
  4019. ++ */
  4020. ++ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
  4021. ++ ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
  4022. ++ ath10k_debug_fw_stats_peers_free(&stats.peers);
  4023. ++
  4024. ++unlock:
  4025. + spin_unlock_bh(&ar->data_lock);
  4026. +- complete(&ar->debug.event_stats_compl);
  4027. + }
  4028. +
  4029. +-static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
  4030. +- size_t count, loff_t *ppos)
  4031. ++static int ath10k_debug_fw_stats_request(struct ath10k *ar)
  4032. + {
  4033. +- struct ath10k *ar = file->private_data;
  4034. +- struct ath10k_target_stats *fw_stats;
  4035. +- char *buf = NULL;
  4036. +- unsigned int len = 0, buf_len = 8000;
  4037. +- ssize_t ret_cnt = 0;
  4038. +- long left;
  4039. +- int i;
  4040. ++ unsigned long timeout;
  4041. + int ret;
  4042. +
  4043. +- fw_stats = &ar->debug.target_stats;
  4044. ++ lockdep_assert_held(&ar->conf_mutex);
  4045. +
  4046. +- mutex_lock(&ar->conf_mutex);
  4047. ++ timeout = jiffies + msecs_to_jiffies(1*HZ);
  4048. +
  4049. +- if (ar->state != ATH10K_STATE_ON)
  4050. +- goto exit;
  4051. ++ ath10k_debug_fw_stats_reset(ar);
  4052. +
  4053. +- buf = kzalloc(buf_len, GFP_KERNEL);
  4054. +- if (!buf)
  4055. +- goto exit;
  4056. ++ for (;;) {
  4057. ++ if (time_after(jiffies, timeout))
  4058. ++ return -ETIMEDOUT;
  4059. +
  4060. +- ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
  4061. +- if (ret) {
  4062. +- ath10k_warn("could not request stats (%d)\n", ret);
  4063. +- goto exit;
  4064. ++ reinit_completion(&ar->debug.fw_stats_complete);
  4065. ++
  4066. ++ ret = ath10k_wmi_request_stats(ar,
  4067. ++ WMI_STAT_PDEV |
  4068. ++ WMI_STAT_VDEV |
  4069. ++ WMI_STAT_PEER);
  4070. ++ if (ret) {
  4071. ++ ath10k_warn(ar, "could not request stats (%d)\n", ret);
  4072. ++ return ret;
  4073. ++ }
  4074. ++
  4075. ++ ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
  4076. ++ 1*HZ);
  4077. ++ if (ret == 0)
  4078. ++ return -ETIMEDOUT;
  4079. ++
  4080. ++ spin_lock_bh(&ar->data_lock);
  4081. ++ if (ar->debug.fw_stats_done) {
  4082. ++ spin_unlock_bh(&ar->data_lock);
  4083. ++ break;
  4084. ++ }
  4085. ++ spin_unlock_bh(&ar->data_lock);
  4086. + }
  4087. +
  4088. +- left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
  4089. +- if (left <= 0)
  4090. +- goto exit;
  4091. ++ return 0;
  4092. ++}
  4093. ++
  4094. ++/* FIXME: How to calculate the buffer size sanely? */
  4095. ++#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
  4096. ++
  4097. ++static void ath10k_fw_stats_fill(struct ath10k *ar,
  4098. ++ struct ath10k_fw_stats *fw_stats,
  4099. ++ char *buf)
  4100. ++{
  4101. ++ unsigned int len = 0;
  4102. ++ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
  4103. ++ const struct ath10k_fw_stats_pdev *pdev;
  4104. ++ const struct ath10k_fw_stats_vdev *vdev;
  4105. ++ const struct ath10k_fw_stats_peer *peer;
  4106. ++ size_t num_peers;
  4107. ++ size_t num_vdevs;
  4108. ++ int i;
  4109. +
  4110. + spin_lock_bh(&ar->data_lock);
  4111. ++
  4112. ++ pdev = list_first_entry_or_null(&fw_stats->pdevs,
  4113. ++ struct ath10k_fw_stats_pdev, list);
  4114. ++ if (!pdev) {
  4115. ++ ath10k_warn(ar, "failed to get pdev stats\n");
  4116. ++ goto unlock;
  4117. ++ }
  4118. ++
  4119. ++ num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
  4120. ++ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
  4121. ++
  4122. + len += scnprintf(buf + len, buf_len - len, "\n");
  4123. + len += scnprintf(buf + len, buf_len - len, "%30s\n",
  4124. + "ath10k PDEV stats");
  4125. +@@ -325,29 +455,29 @@ static ssize_t ath10k_read_fw_stats(stru
  4126. + "=================");
  4127. +
  4128. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4129. +- "Channel noise floor", fw_stats->ch_noise_floor);
  4130. ++ "Channel noise floor", pdev->ch_noise_floor);
  4131. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4132. +- "Channel TX power", fw_stats->chan_tx_power);
  4133. ++ "Channel TX power", pdev->chan_tx_power);
  4134. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4135. +- "TX frame count", fw_stats->tx_frame_count);
  4136. ++ "TX frame count", pdev->tx_frame_count);
  4137. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4138. +- "RX frame count", fw_stats->rx_frame_count);
  4139. ++ "RX frame count", pdev->rx_frame_count);
  4140. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4141. +- "RX clear count", fw_stats->rx_clear_count);
  4142. ++ "RX clear count", pdev->rx_clear_count);
  4143. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4144. +- "Cycle count", fw_stats->cycle_count);
  4145. ++ "Cycle count", pdev->cycle_count);
  4146. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4147. +- "PHY error count", fw_stats->phy_err_count);
  4148. ++ "PHY error count", pdev->phy_err_count);
  4149. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4150. +- "RTS bad count", fw_stats->rts_bad);
  4151. ++ "RTS bad count", pdev->rts_bad);
  4152. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4153. +- "RTS good count", fw_stats->rts_good);
  4154. ++ "RTS good count", pdev->rts_good);
  4155. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4156. +- "FCS bad count", fw_stats->fcs_bad);
  4157. ++ "FCS bad count", pdev->fcs_bad);
  4158. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4159. +- "No beacon count", fw_stats->no_beacons);
  4160. ++ "No beacon count", pdev->no_beacons);
  4161. + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
  4162. +- "MIB int count", fw_stats->mib_int_count);
  4163. ++ "MIB int count", pdev->mib_int_count);
  4164. +
  4165. + len += scnprintf(buf + len, buf_len - len, "\n");
  4166. + len += scnprintf(buf + len, buf_len - len, "%30s\n",
  4167. +@@ -356,51 +486,51 @@ static ssize_t ath10k_read_fw_stats(stru
  4168. + "=================");
  4169. +
  4170. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4171. +- "HTT cookies queued", fw_stats->comp_queued);
  4172. ++ "HTT cookies queued", pdev->comp_queued);
  4173. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4174. +- "HTT cookies disp.", fw_stats->comp_delivered);
  4175. ++ "HTT cookies disp.", pdev->comp_delivered);
  4176. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4177. +- "MSDU queued", fw_stats->msdu_enqued);
  4178. ++ "MSDU queued", pdev->msdu_enqued);
  4179. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4180. +- "MPDU queued", fw_stats->mpdu_enqued);
  4181. ++ "MPDU queued", pdev->mpdu_enqued);
  4182. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4183. +- "MSDUs dropped", fw_stats->wmm_drop);
  4184. ++ "MSDUs dropped", pdev->wmm_drop);
  4185. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4186. +- "Local enqued", fw_stats->local_enqued);
  4187. ++ "Local enqued", pdev->local_enqued);
  4188. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4189. +- "Local freed", fw_stats->local_freed);
  4190. ++ "Local freed", pdev->local_freed);
  4191. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4192. +- "HW queued", fw_stats->hw_queued);
  4193. ++ "HW queued", pdev->hw_queued);
  4194. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4195. +- "PPDUs reaped", fw_stats->hw_reaped);
  4196. ++ "PPDUs reaped", pdev->hw_reaped);
  4197. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4198. +- "Num underruns", fw_stats->underrun);
  4199. ++ "Num underruns", pdev->underrun);
  4200. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4201. +- "PPDUs cleaned", fw_stats->tx_abort);
  4202. ++ "PPDUs cleaned", pdev->tx_abort);
  4203. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4204. +- "MPDUs requed", fw_stats->mpdus_requed);
  4205. ++ "MPDUs requed", pdev->mpdus_requed);
  4206. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4207. +- "Excessive retries", fw_stats->tx_ko);
  4208. ++ "Excessive retries", pdev->tx_ko);
  4209. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4210. +- "HW rate", fw_stats->data_rc);
  4211. ++ "HW rate", pdev->data_rc);
  4212. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4213. +- "Sched self tiggers", fw_stats->self_triggers);
  4214. ++ "Sched self tiggers", pdev->self_triggers);
  4215. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4216. + "Dropped due to SW retries",
  4217. +- fw_stats->sw_retry_failure);
  4218. ++ pdev->sw_retry_failure);
  4219. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4220. + "Illegal rate phy errors",
  4221. +- fw_stats->illgl_rate_phy_err);
  4222. ++ pdev->illgl_rate_phy_err);
  4223. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4224. +- "Pdev continous xretry", fw_stats->pdev_cont_xretry);
  4225. ++ "Pdev continous xretry", pdev->pdev_cont_xretry);
  4226. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4227. +- "TX timeout", fw_stats->pdev_tx_timeout);
  4228. ++ "TX timeout", pdev->pdev_tx_timeout);
  4229. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4230. +- "PDEV resets", fw_stats->pdev_resets);
  4231. ++ "PDEV resets", pdev->pdev_resets);
  4232. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4233. +- "PHY underrun", fw_stats->phy_underrun);
  4234. ++ "PHY underrun", pdev->phy_underrun);
  4235. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4236. +- "MPDU is more than txop limit", fw_stats->txop_ovf);
  4237. ++ "MPDU is more than txop limit", pdev->txop_ovf);
  4238. +
  4239. + len += scnprintf(buf + len, buf_len - len, "\n");
  4240. + len += scnprintf(buf + len, buf_len - len, "%30s\n",
  4241. +@@ -410,84 +540,254 @@ static ssize_t ath10k_read_fw_stats(stru
  4242. +
  4243. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4244. + "Mid PPDU route change",
  4245. +- fw_stats->mid_ppdu_route_change);
  4246. ++ pdev->mid_ppdu_route_change);
  4247. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4248. +- "Tot. number of statuses", fw_stats->status_rcvd);
  4249. ++ "Tot. number of statuses", pdev->status_rcvd);
  4250. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4251. +- "Extra frags on rings 0", fw_stats->r0_frags);
  4252. ++ "Extra frags on rings 0", pdev->r0_frags);
  4253. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4254. +- "Extra frags on rings 1", fw_stats->r1_frags);
  4255. ++ "Extra frags on rings 1", pdev->r1_frags);
  4256. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4257. +- "Extra frags on rings 2", fw_stats->r2_frags);
  4258. ++ "Extra frags on rings 2", pdev->r2_frags);
  4259. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4260. +- "Extra frags on rings 3", fw_stats->r3_frags);
  4261. ++ "Extra frags on rings 3", pdev->r3_frags);
  4262. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4263. +- "MSDUs delivered to HTT", fw_stats->htt_msdus);
  4264. ++ "MSDUs delivered to HTT", pdev->htt_msdus);
  4265. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4266. +- "MPDUs delivered to HTT", fw_stats->htt_mpdus);
  4267. ++ "MPDUs delivered to HTT", pdev->htt_mpdus);
  4268. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4269. +- "MSDUs delivered to stack", fw_stats->loc_msdus);
  4270. ++ "MSDUs delivered to stack", pdev->loc_msdus);
  4271. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4272. +- "MPDUs delivered to stack", fw_stats->loc_mpdus);
  4273. ++ "MPDUs delivered to stack", pdev->loc_mpdus);
  4274. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4275. +- "Oversized AMSUs", fw_stats->oversize_amsdu);
  4276. ++ "Oversized AMSUs", pdev->oversize_amsdu);
  4277. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4278. +- "PHY errors", fw_stats->phy_errs);
  4279. ++ "PHY errors", pdev->phy_errs);
  4280. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4281. +- "PHY errors drops", fw_stats->phy_err_drop);
  4282. ++ "PHY errors drops", pdev->phy_err_drop);
  4283. + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
  4284. +- "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
  4285. ++ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
  4286. ++
  4287. ++ len += scnprintf(buf + len, buf_len - len, "\n");
  4288. ++ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
  4289. ++ "ath10k VDEV stats", num_vdevs);
  4290. ++ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  4291. ++ "=================");
  4292. ++
  4293. ++ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
  4294. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4295. ++ "vdev id", vdev->vdev_id);
  4296. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4297. ++ "beacon snr", vdev->beacon_snr);
  4298. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4299. ++ "data snr", vdev->data_snr);
  4300. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4301. ++ "num rx frames", vdev->num_rx_frames);
  4302. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4303. ++ "num rts fail", vdev->num_rts_fail);
  4304. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4305. ++ "num rts success", vdev->num_rts_success);
  4306. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4307. ++ "num rx err", vdev->num_rx_err);
  4308. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4309. ++ "num rx discard", vdev->num_rx_discard);
  4310. ++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4311. ++ "num tx not acked", vdev->num_tx_not_acked);
  4312. ++
  4313. ++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
  4314. ++ len += scnprintf(buf + len, buf_len - len,
  4315. ++ "%25s [%02d] %u\n",
  4316. ++ "num tx frames", i,
  4317. ++ vdev->num_tx_frames[i]);
  4318. ++
  4319. ++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
  4320. ++ len += scnprintf(buf + len, buf_len - len,
  4321. ++ "%25s [%02d] %u\n",
  4322. ++ "num tx frames retries", i,
  4323. ++ vdev->num_tx_frames_retries[i]);
  4324. ++
  4325. ++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
  4326. ++ len += scnprintf(buf + len, buf_len - len,
  4327. ++ "%25s [%02d] %u\n",
  4328. ++ "num tx frames failures", i,
  4329. ++ vdev->num_tx_frames_failures[i]);
  4330. ++
  4331. ++ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
  4332. ++ len += scnprintf(buf + len, buf_len - len,
  4333. ++ "%25s [%02d] 0x%08x\n",
  4334. ++ "tx rate history", i,
  4335. ++ vdev->tx_rate_history[i]);
  4336. ++
  4337. ++ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
  4338. ++ len += scnprintf(buf + len, buf_len - len,
  4339. ++ "%25s [%02d] %u\n",
  4340. ++ "beacon rssi history", i,
  4341. ++ vdev->beacon_rssi_history[i]);
  4342. ++
  4343. ++ len += scnprintf(buf + len, buf_len - len, "\n");
  4344. ++ }
  4345. +
  4346. + len += scnprintf(buf + len, buf_len - len, "\n");
  4347. +- len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
  4348. +- "ath10k PEER stats", fw_stats->peers);
  4349. ++ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
  4350. ++ "ath10k PEER stats", num_peers);
  4351. + len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
  4352. + "=================");
  4353. +
  4354. +- for (i = 0; i < fw_stats->peers; i++) {
  4355. ++ list_for_each_entry(peer, &fw_stats->peers, list) {
  4356. + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
  4357. +- "Peer MAC address",
  4358. +- fw_stats->peer_stat[i].peer_macaddr);
  4359. ++ "Peer MAC address", peer->peer_macaddr);
  4360. + len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4361. +- "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
  4362. ++ "Peer RSSI", peer->peer_rssi);
  4363. + len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4364. +- "Peer TX rate",
  4365. +- fw_stats->peer_stat[i].peer_tx_rate);
  4366. ++ "Peer TX rate", peer->peer_tx_rate);
  4367. + len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
  4368. +- "Peer RX rate",
  4369. +- fw_stats->peer_stat[i].peer_rx_rate);
  4370. ++ "Peer RX rate", peer->peer_rx_rate);
  4371. + len += scnprintf(buf + len, buf_len - len, "\n");
  4372. + }
  4373. ++
  4374. ++unlock:
  4375. + spin_unlock_bh(&ar->data_lock);
  4376. +
  4377. +- if (len > buf_len)
  4378. +- len = buf_len;
  4379. ++ if (len >= buf_len)
  4380. ++ buf[len - 1] = 0;
  4381. ++ else
  4382. ++ buf[len] = 0;
  4383. ++}
  4384. +
  4385. +- ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4386. ++static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
  4387. ++{
  4388. ++ struct ath10k *ar = inode->i_private;
  4389. ++ void *buf = NULL;
  4390. ++ int ret;
  4391. ++
  4392. ++ mutex_lock(&ar->conf_mutex);
  4393. ++
  4394. ++ if (ar->state != ATH10K_STATE_ON) {
  4395. ++ ret = -ENETDOWN;
  4396. ++ goto err_unlock;
  4397. ++ }
  4398. ++
  4399. ++ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
  4400. ++ if (!buf) {
  4401. ++ ret = -ENOMEM;
  4402. ++ goto err_unlock;
  4403. ++ }
  4404. ++
  4405. ++ ret = ath10k_debug_fw_stats_request(ar);
  4406. ++ if (ret) {
  4407. ++ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
  4408. ++ goto err_free;
  4409. ++ }
  4410. ++
  4411. ++ ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
  4412. ++ file->private_data = buf;
  4413. +
  4414. +-exit:
  4415. + mutex_unlock(&ar->conf_mutex);
  4416. +- kfree(buf);
  4417. +- return ret_cnt;
  4418. ++ return 0;
  4419. ++
  4420. ++err_free:
  4421. ++ vfree(buf);
  4422. ++
  4423. ++err_unlock:
  4424. ++ mutex_unlock(&ar->conf_mutex);
  4425. ++ return ret;
  4426. ++}
  4427. ++
  4428. ++static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
  4429. ++{
  4430. ++ vfree(file->private_data);
  4431. ++
  4432. ++ return 0;
  4433. ++}
  4434. ++
  4435. ++static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
  4436. ++ size_t count, loff_t *ppos)
  4437. ++{
  4438. ++ const char *buf = file->private_data;
  4439. ++ unsigned int len = strlen(buf);
  4440. ++
  4441. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4442. + }
  4443. +
  4444. + static const struct file_operations fops_fw_stats = {
  4445. +- .read = ath10k_read_fw_stats,
  4446. ++ .open = ath10k_fw_stats_open,
  4447. ++ .release = ath10k_fw_stats_release,
  4448. ++ .read = ath10k_fw_stats_read,
  4449. ++ .owner = THIS_MODULE,
  4450. ++ .llseek = default_llseek,
  4451. ++};
  4452. ++
  4453. ++static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
  4454. ++ char __user *user_buf,
  4455. ++ size_t count, loff_t *ppos)
  4456. ++{
  4457. ++ struct ath10k *ar = file->private_data;
  4458. ++ int ret, len, buf_len;
  4459. ++ char *buf;
  4460. ++
  4461. ++ buf_len = 500;
  4462. ++ buf = kmalloc(buf_len, GFP_KERNEL);
  4463. ++ if (!buf)
  4464. ++ return -ENOMEM;
  4465. ++
  4466. ++ spin_lock_bh(&ar->data_lock);
  4467. ++
  4468. ++ len = 0;
  4469. ++ len += scnprintf(buf + len, buf_len - len,
  4470. ++ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
  4471. ++ len += scnprintf(buf + len, buf_len - len,
  4472. ++ "fw_warm_reset_counter\t\t%d\n",
  4473. ++ ar->stats.fw_warm_reset_counter);
  4474. ++ len += scnprintf(buf + len, buf_len - len,
  4475. ++ "fw_cold_reset_counter\t\t%d\n",
  4476. ++ ar->stats.fw_cold_reset_counter);
  4477. ++
  4478. ++ spin_unlock_bh(&ar->data_lock);
  4479. ++
  4480. ++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4481. ++
  4482. ++ kfree(buf);
  4483. ++
  4484. ++ return ret;
  4485. ++}
  4486. ++
  4487. ++static const struct file_operations fops_fw_reset_stats = {
  4488. + .open = simple_open,
  4489. ++ .read = ath10k_debug_fw_reset_stats_read,
  4490. + .owner = THIS_MODULE,
  4491. + .llseek = default_llseek,
  4492. + };
  4493. +
  4494. ++/* This is a clean assert crash in firmware. */
  4495. ++static int ath10k_debug_fw_assert(struct ath10k *ar)
  4496. ++{
  4497. ++ struct wmi_vdev_install_key_cmd *cmd;
  4498. ++ struct sk_buff *skb;
  4499. ++
  4500. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
  4501. ++ if (!skb)
  4502. ++ return -ENOMEM;
  4503. ++
  4504. ++ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  4505. ++ memset(cmd, 0, sizeof(*cmd));
  4506. ++
  4507. ++ /* big enough number so that firmware asserts */
  4508. ++ cmd->vdev_id = __cpu_to_le32(0x7ffe);
  4509. ++
  4510. ++ return ath10k_wmi_cmd_send(ar, skb,
  4511. ++ ar->wmi.cmd->vdev_install_key_cmdid);
  4512. ++}
  4513. ++
  4514. + static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
  4515. + char __user *user_buf,
  4516. + size_t count, loff_t *ppos)
  4517. + {
  4518. +- const char buf[] = "To simulate firmware crash write one of the"
  4519. +- " keywords to this file:\n `soft` - this will send"
  4520. +- " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
  4521. +- " supports that command.\n `hard` - this will send"
  4522. +- " to firmware command with illegal parameters"
  4523. +- " causing firmware crash.\n";
  4524. ++ const char buf[] =
  4525. ++ "To simulate firmware crash write one of the keywords to this file:\n"
  4526. ++ "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
  4527. ++ "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
  4528. ++ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
  4529. ++ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
  4530. +
  4531. + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
  4532. + }
  4533. +@@ -527,19 +827,30 @@ static ssize_t ath10k_write_simulate_fw_
  4534. + }
  4535. +
  4536. + if (!strcmp(buf, "soft")) {
  4537. +- ath10k_info("simulating soft firmware crash\n");
  4538. ++ ath10k_info(ar, "simulating soft firmware crash\n");
  4539. + ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
  4540. + } else if (!strcmp(buf, "hard")) {
  4541. +- ath10k_info("simulating hard firmware crash\n");
  4542. +- ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
  4543. +- ar->wmi.vdev_param->rts_threshold, 0);
  4544. ++ ath10k_info(ar, "simulating hard firmware crash\n");
  4545. ++ /* 0x7fff is vdev id, and it is always out of range for all
  4546. ++ * firmware variants in order to force a firmware crash.
  4547. ++ */
  4548. ++ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
  4549. ++ ar->wmi.vdev_param->rts_threshold,
  4550. ++ 0);
  4551. ++ } else if (!strcmp(buf, "assert")) {
  4552. ++ ath10k_info(ar, "simulating firmware assert crash\n");
  4553. ++ ret = ath10k_debug_fw_assert(ar);
  4554. ++ } else if (!strcmp(buf, "hw-restart")) {
  4555. ++ ath10k_info(ar, "user requested hw restart\n");
  4556. ++ queue_work(ar->workqueue, &ar->restart_work);
  4557. ++ ret = 0;
  4558. + } else {
  4559. + ret = -EINVAL;
  4560. + goto exit;
  4561. + }
  4562. +
  4563. + if (ret) {
  4564. +- ath10k_warn("failed to simulate firmware crash: %d\n", ret);
  4565. ++ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
  4566. + goto exit;
  4567. + }
  4568. +
  4569. +@@ -565,13 +876,375 @@ static ssize_t ath10k_read_chip_id(struc
  4570. + unsigned int len;
  4571. + char buf[50];
  4572. +
  4573. +- len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
  4574. ++ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
  4575. ++
  4576. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4577. ++}
  4578. ++
  4579. ++static const struct file_operations fops_chip_id = {
  4580. ++ .read = ath10k_read_chip_id,
  4581. ++ .open = simple_open,
  4582. ++ .owner = THIS_MODULE,
  4583. ++ .llseek = default_llseek,
  4584. ++};
  4585. ++
  4586. ++struct ath10k_fw_crash_data *
  4587. ++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
  4588. ++{
  4589. ++ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
  4590. ++
  4591. ++ lockdep_assert_held(&ar->data_lock);
  4592. ++
  4593. ++ crash_data->crashed_since_read = true;
  4594. ++ uuid_le_gen(&crash_data->uuid);
  4595. ++ getnstimeofday(&crash_data->timestamp);
  4596. ++
  4597. ++ return crash_data;
  4598. ++}
  4599. ++EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
  4600. ++
  4601. ++static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
  4602. ++{
  4603. ++ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
  4604. ++ struct ath10k_dump_file_data *dump_data;
  4605. ++ struct ath10k_tlv_dump_data *dump_tlv;
  4606. ++ int hdr_len = sizeof(*dump_data);
  4607. ++ unsigned int len, sofar = 0;
  4608. ++ unsigned char *buf;
  4609. ++
  4610. ++ len = hdr_len;
  4611. ++ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
  4612. ++
  4613. ++ sofar += hdr_len;
  4614. ++
  4615. ++ /* This is going to get big when we start dumping FW RAM and such,
  4616. ++ * so go ahead and use vmalloc.
  4617. ++ */
  4618. ++ buf = vzalloc(len);
  4619. ++ if (!buf)
  4620. ++ return NULL;
  4621. ++
  4622. ++ spin_lock_bh(&ar->data_lock);
  4623. ++
  4624. ++ if (!crash_data->crashed_since_read) {
  4625. ++ spin_unlock_bh(&ar->data_lock);
  4626. ++ vfree(buf);
  4627. ++ return NULL;
  4628. ++ }
  4629. ++
  4630. ++ dump_data = (struct ath10k_dump_file_data *)(buf);
  4631. ++ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
  4632. ++ sizeof(dump_data->df_magic));
  4633. ++ dump_data->len = cpu_to_le32(len);
  4634. ++
  4635. ++ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
  4636. ++
  4637. ++ memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
  4638. ++ dump_data->chip_id = cpu_to_le32(ar->chip_id);
  4639. ++ dump_data->bus_type = cpu_to_le32(0);
  4640. ++ dump_data->target_version = cpu_to_le32(ar->target_version);
  4641. ++ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
  4642. ++ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
  4643. ++ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
  4644. ++ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
  4645. ++ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
  4646. ++ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
  4647. ++ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
  4648. ++ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
  4649. ++ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
  4650. ++ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
  4651. ++
  4652. ++ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
  4653. ++ sizeof(dump_data->fw_ver));
  4654. ++
  4655. ++ dump_data->kernel_ver_code = 0;
  4656. ++ strlcpy(dump_data->kernel_ver, init_utsname()->release,
  4657. ++ sizeof(dump_data->kernel_ver));
  4658. ++
  4659. ++ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
  4660. ++ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
  4661. ++
  4662. ++ /* Gather crash-dump */
  4663. ++ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
  4664. ++ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
  4665. ++ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
  4666. ++ memcpy(dump_tlv->tlv_data, &crash_data->registers,
  4667. ++ sizeof(crash_data->registers));
  4668. ++ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
  4669. ++
  4670. ++ ar->debug.fw_crash_data->crashed_since_read = false;
  4671. ++
  4672. ++ spin_unlock_bh(&ar->data_lock);
  4673. ++
  4674. ++ return dump_data;
  4675. ++}
  4676. ++
  4677. ++static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
  4678. ++{
  4679. ++ struct ath10k *ar = inode->i_private;
  4680. ++ struct ath10k_dump_file_data *dump;
  4681. ++
  4682. ++ dump = ath10k_build_dump_file(ar);
  4683. ++ if (!dump)
  4684. ++ return -ENODATA;
  4685. ++
  4686. ++ file->private_data = dump;
  4687. ++
  4688. ++ return 0;
  4689. ++}
  4690. ++
  4691. ++static ssize_t ath10k_fw_crash_dump_read(struct file *file,
  4692. ++ char __user *user_buf,
  4693. ++ size_t count, loff_t *ppos)
  4694. ++{
  4695. ++ struct ath10k_dump_file_data *dump_file = file->private_data;
  4696. ++
  4697. ++ return simple_read_from_buffer(user_buf, count, ppos,
  4698. ++ dump_file,
  4699. ++ le32_to_cpu(dump_file->len));
  4700. ++}
  4701. ++
  4702. ++static int ath10k_fw_crash_dump_release(struct inode *inode,
  4703. ++ struct file *file)
  4704. ++{
  4705. ++ vfree(file->private_data);
  4706. ++
  4707. ++ return 0;
  4708. ++}
  4709. ++
  4710. ++static const struct file_operations fops_fw_crash_dump = {
  4711. ++ .open = ath10k_fw_crash_dump_open,
  4712. ++ .read = ath10k_fw_crash_dump_read,
  4713. ++ .release = ath10k_fw_crash_dump_release,
  4714. ++ .owner = THIS_MODULE,
  4715. ++ .llseek = default_llseek,
  4716. ++};
  4717. ++
  4718. ++static ssize_t ath10k_reg_addr_read(struct file *file,
  4719. ++ char __user *user_buf,
  4720. ++ size_t count, loff_t *ppos)
  4721. ++{
  4722. ++ struct ath10k *ar = file->private_data;
  4723. ++ u8 buf[32];
  4724. ++ unsigned int len = 0;
  4725. ++ u32 reg_addr;
  4726. ++
  4727. ++ mutex_lock(&ar->conf_mutex);
  4728. ++ reg_addr = ar->debug.reg_addr;
  4729. ++ mutex_unlock(&ar->conf_mutex);
  4730. ++
  4731. ++ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
  4732. ++
  4733. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4734. ++}
  4735. ++
  4736. ++static ssize_t ath10k_reg_addr_write(struct file *file,
  4737. ++ const char __user *user_buf,
  4738. ++ size_t count, loff_t *ppos)
  4739. ++{
  4740. ++ struct ath10k *ar = file->private_data;
  4741. ++ u32 reg_addr;
  4742. ++ int ret;
  4743. ++
  4744. ++ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
  4745. ++ if (ret)
  4746. ++ return ret;
  4747. ++
  4748. ++ if (!IS_ALIGNED(reg_addr, 4))
  4749. ++ return -EFAULT;
  4750. ++
  4751. ++ mutex_lock(&ar->conf_mutex);
  4752. ++ ar->debug.reg_addr = reg_addr;
  4753. ++ mutex_unlock(&ar->conf_mutex);
  4754. ++
  4755. ++ return count;
  4756. ++}
  4757. ++
  4758. ++static const struct file_operations fops_reg_addr = {
  4759. ++ .read = ath10k_reg_addr_read,
  4760. ++ .write = ath10k_reg_addr_write,
  4761. ++ .open = simple_open,
  4762. ++ .owner = THIS_MODULE,
  4763. ++ .llseek = default_llseek,
  4764. ++};
  4765. ++
  4766. ++static ssize_t ath10k_reg_value_read(struct file *file,
  4767. ++ char __user *user_buf,
  4768. ++ size_t count, loff_t *ppos)
  4769. ++{
  4770. ++ struct ath10k *ar = file->private_data;
  4771. ++ u8 buf[48];
  4772. ++ unsigned int len;
  4773. ++ u32 reg_addr, reg_val;
  4774. ++ int ret;
  4775. ++
  4776. ++ mutex_lock(&ar->conf_mutex);
  4777. ++
  4778. ++ if (ar->state != ATH10K_STATE_ON &&
  4779. ++ ar->state != ATH10K_STATE_UTF) {
  4780. ++ ret = -ENETDOWN;
  4781. ++ goto exit;
  4782. ++ }
  4783. ++
  4784. ++ reg_addr = ar->debug.reg_addr;
  4785. ++
  4786. ++ reg_val = ath10k_hif_read32(ar, reg_addr);
  4787. ++ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
  4788. ++
  4789. ++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4790. ++
  4791. ++exit:
  4792. ++ mutex_unlock(&ar->conf_mutex);
  4793. ++
  4794. ++ return ret;
  4795. ++}
  4796. ++
  4797. ++static ssize_t ath10k_reg_value_write(struct file *file,
  4798. ++ const char __user *user_buf,
  4799. ++ size_t count, loff_t *ppos)
  4800. ++{
  4801. ++ struct ath10k *ar = file->private_data;
  4802. ++ u32 reg_addr, reg_val;
  4803. ++ int ret;
  4804. ++
  4805. ++ mutex_lock(&ar->conf_mutex);
  4806. ++
  4807. ++ if (ar->state != ATH10K_STATE_ON &&
  4808. ++ ar->state != ATH10K_STATE_UTF) {
  4809. ++ ret = -ENETDOWN;
  4810. ++ goto exit;
  4811. ++ }
  4812. ++
  4813. ++ reg_addr = ar->debug.reg_addr;
  4814. ++
  4815. ++ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
  4816. ++ if (ret)
  4817. ++ goto exit;
  4818. ++
  4819. ++ ath10k_hif_write32(ar, reg_addr, reg_val);
  4820. ++
  4821. ++ ret = count;
  4822. ++
  4823. ++exit:
  4824. ++ mutex_unlock(&ar->conf_mutex);
  4825. ++
  4826. ++ return ret;
  4827. ++}
  4828. ++
  4829. ++static const struct file_operations fops_reg_value = {
  4830. ++ .read = ath10k_reg_value_read,
  4831. ++ .write = ath10k_reg_value_write,
  4832. ++ .open = simple_open,
  4833. ++ .owner = THIS_MODULE,
  4834. ++ .llseek = default_llseek,
  4835. ++};
  4836. ++
  4837. ++static ssize_t ath10k_mem_value_read(struct file *file,
  4838. ++ char __user *user_buf,
  4839. ++ size_t count, loff_t *ppos)
  4840. ++{
  4841. ++ struct ath10k *ar = file->private_data;
  4842. ++ u8 *buf;
  4843. ++ int ret;
  4844. ++
  4845. ++ if (*ppos < 0)
  4846. ++ return -EINVAL;
  4847. ++
  4848. ++ if (!count)
  4849. ++ return 0;
  4850. ++
  4851. ++ mutex_lock(&ar->conf_mutex);
  4852. ++
  4853. ++ buf = vmalloc(count);
  4854. ++ if (!buf) {
  4855. ++ ret = -ENOMEM;
  4856. ++ goto exit;
  4857. ++ }
  4858. ++
  4859. ++ if (ar->state != ATH10K_STATE_ON &&
  4860. ++ ar->state != ATH10K_STATE_UTF) {
  4861. ++ ret = -ENETDOWN;
  4862. ++ goto exit;
  4863. ++ }
  4864. ++
  4865. ++ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
  4866. ++ if (ret) {
  4867. ++ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
  4868. ++ (u32)(*ppos), ret);
  4869. ++ goto exit;
  4870. ++ }
  4871. ++
  4872. ++ ret = copy_to_user(user_buf, buf, count);
  4873. ++ if (ret) {
  4874. ++ ret = -EFAULT;
  4875. ++ goto exit;
  4876. ++ }
  4877. ++
  4878. ++ count -= ret;
  4879. ++ *ppos += count;
  4880. ++ ret = count;
  4881. ++
  4882. ++exit:
  4883. ++ vfree(buf);
  4884. ++ mutex_unlock(&ar->conf_mutex);
  4885. ++
  4886. ++ return ret;
  4887. ++}
  4888. ++
  4889. ++static ssize_t ath10k_mem_value_write(struct file *file,
  4890. ++ const char __user *user_buf,
  4891. ++ size_t count, loff_t *ppos)
  4892. ++{
  4893. ++ struct ath10k *ar = file->private_data;
  4894. ++ u8 *buf;
  4895. ++ int ret;
  4896. ++
  4897. ++ if (*ppos < 0)
  4898. ++ return -EINVAL;
  4899. ++
  4900. ++ if (!count)
  4901. ++ return 0;
  4902. ++
  4903. ++ mutex_lock(&ar->conf_mutex);
  4904. ++
  4905. ++ buf = vmalloc(count);
  4906. ++ if (!buf) {
  4907. ++ ret = -ENOMEM;
  4908. ++ goto exit;
  4909. ++ }
  4910. ++
  4911. ++ if (ar->state != ATH10K_STATE_ON &&
  4912. ++ ar->state != ATH10K_STATE_UTF) {
  4913. ++ ret = -ENETDOWN;
  4914. ++ goto exit;
  4915. ++ }
  4916. ++
  4917. ++ ret = copy_from_user(buf, user_buf, count);
  4918. ++ if (ret) {
  4919. ++ ret = -EFAULT;
  4920. ++ goto exit;
  4921. ++ }
  4922. ++
  4923. ++ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
  4924. ++ if (ret) {
  4925. ++ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
  4926. ++ (u32)(*ppos), ret);
  4927. ++ goto exit;
  4928. ++ }
  4929. ++
  4930. ++ *ppos += count;
  4931. ++ ret = count;
  4932. +
  4933. +- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  4934. ++exit:
  4935. ++ vfree(buf);
  4936. ++ mutex_unlock(&ar->conf_mutex);
  4937. ++
  4938. ++ return ret;
  4939. + }
  4940. +
  4941. +-static const struct file_operations fops_chip_id = {
  4942. +- .read = ath10k_read_chip_id,
  4943. ++static const struct file_operations fops_mem_value = {
  4944. ++ .read = ath10k_mem_value_read,
  4945. ++ .write = ath10k_mem_value_write,
  4946. + .open = simple_open,
  4947. + .owner = THIS_MODULE,
  4948. + .llseek = default_llseek,
  4949. +@@ -596,7 +1269,7 @@ static int ath10k_debug_htt_stats_req(st
  4950. + ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
  4951. + cookie);
  4952. + if (ret) {
  4953. +- ath10k_warn("failed to send htt stats request: %d\n", ret);
  4954. ++ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
  4955. + return ret;
  4956. + }
  4957. +
  4958. +@@ -619,8 +1292,8 @@ static void ath10k_debug_htt_stats_dwork
  4959. + }
  4960. +
  4961. + static ssize_t ath10k_read_htt_stats_mask(struct file *file,
  4962. +- char __user *user_buf,
  4963. +- size_t count, loff_t *ppos)
  4964. ++ char __user *user_buf,
  4965. ++ size_t count, loff_t *ppos)
  4966. + {
  4967. + struct ath10k *ar = file->private_data;
  4968. + char buf[32];
  4969. +@@ -632,8 +1305,8 @@ static ssize_t ath10k_read_htt_stats_mas
  4970. + }
  4971. +
  4972. + static ssize_t ath10k_write_htt_stats_mask(struct file *file,
  4973. +- const char __user *user_buf,
  4974. +- size_t count, loff_t *ppos)
  4975. ++ const char __user *user_buf,
  4976. ++ size_t count, loff_t *ppos)
  4977. + {
  4978. + struct ath10k *ar = file->private_data;
  4979. + unsigned long mask;
  4980. +@@ -671,16 +1344,82 @@ static const struct file_operations fops
  4981. + .llseek = default_llseek,
  4982. + };
  4983. +
  4984. ++static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
  4985. ++ char __user *user_buf,
  4986. ++ size_t count, loff_t *ppos)
  4987. ++{
  4988. ++ struct ath10k *ar = file->private_data;
  4989. ++ char buf[64];
  4990. ++ u8 amsdu = 3, ampdu = 64;
  4991. ++ unsigned int len;
  4992. ++
  4993. ++ mutex_lock(&ar->conf_mutex);
  4994. ++
  4995. ++ if (ar->debug.htt_max_amsdu)
  4996. ++ amsdu = ar->debug.htt_max_amsdu;
  4997. ++
  4998. ++ if (ar->debug.htt_max_ampdu)
  4999. ++ ampdu = ar->debug.htt_max_ampdu;
  5000. ++
  5001. ++ mutex_unlock(&ar->conf_mutex);
  5002. ++
  5003. ++ len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
  5004. ++
  5005. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  5006. ++}
  5007. ++
  5008. ++static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
  5009. ++ const char __user *user_buf,
  5010. ++ size_t count, loff_t *ppos)
  5011. ++{
  5012. ++ struct ath10k *ar = file->private_data;
  5013. ++ int res;
  5014. ++ char buf[64];
  5015. ++ unsigned int amsdu, ampdu;
  5016. ++
  5017. ++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
  5018. ++
  5019. ++ /* make sure that buf is null terminated */
  5020. ++ buf[sizeof(buf) - 1] = 0;
  5021. ++
  5022. ++ res = sscanf(buf, "%u %u", &amsdu, &ampdu);
  5023. ++
  5024. ++ if (res != 2)
  5025. ++ return -EINVAL;
  5026. ++
  5027. ++ mutex_lock(&ar->conf_mutex);
  5028. ++
  5029. ++ res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
  5030. ++ if (res)
  5031. ++ goto out;
  5032. ++
  5033. ++ res = count;
  5034. ++ ar->debug.htt_max_amsdu = amsdu;
  5035. ++ ar->debug.htt_max_ampdu = ampdu;
  5036. ++
  5037. ++out:
  5038. ++ mutex_unlock(&ar->conf_mutex);
  5039. ++ return res;
  5040. ++}
  5041. ++
  5042. ++static const struct file_operations fops_htt_max_amsdu_ampdu = {
  5043. ++ .read = ath10k_read_htt_max_amsdu_ampdu,
  5044. ++ .write = ath10k_write_htt_max_amsdu_ampdu,
  5045. ++ .open = simple_open,
  5046. ++ .owner = THIS_MODULE,
  5047. ++ .llseek = default_llseek,
  5048. ++};
  5049. ++
  5050. + static ssize_t ath10k_read_fw_dbglog(struct file *file,
  5051. +- char __user *user_buf,
  5052. +- size_t count, loff_t *ppos)
  5053. ++ char __user *user_buf,
  5054. ++ size_t count, loff_t *ppos)
  5055. + {
  5056. + struct ath10k *ar = file->private_data;
  5057. + unsigned int len;
  5058. +- char buf[32];
  5059. ++ char buf[64];
  5060. +
  5061. +- len = scnprintf(buf, sizeof(buf), "0x%08x\n",
  5062. +- ar->debug.fw_dbglog_mask);
  5063. ++ len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
  5064. ++ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
  5065. +
  5066. + return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  5067. + }
  5068. +@@ -690,21 +1429,34 @@ static ssize_t ath10k_write_fw_dbglog(st
  5069. + size_t count, loff_t *ppos)
  5070. + {
  5071. + struct ath10k *ar = file->private_data;
  5072. +- unsigned long mask;
  5073. + int ret;
  5074. ++ char buf[64];
  5075. ++ unsigned int log_level, mask;
  5076. +
  5077. +- ret = kstrtoul_from_user(user_buf, count, 0, &mask);
  5078. +- if (ret)
  5079. +- return ret;
  5080. ++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
  5081. ++
  5082. ++ /* make sure that buf is null terminated */
  5083. ++ buf[sizeof(buf) - 1] = 0;
  5084. ++
  5085. ++ ret = sscanf(buf, "%x %u", &mask, &log_level);
  5086. ++
  5087. ++ if (!ret)
  5088. ++ return -EINVAL;
  5089. ++
  5090. ++ if (ret == 1)
  5091. ++ /* default if user did not specify */
  5092. ++ log_level = ATH10K_DBGLOG_LEVEL_WARN;
  5093. +
  5094. + mutex_lock(&ar->conf_mutex);
  5095. +
  5096. + ar->debug.fw_dbglog_mask = mask;
  5097. ++ ar->debug.fw_dbglog_level = log_level;
  5098. +
  5099. + if (ar->state == ATH10K_STATE_ON) {
  5100. +- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
  5101. ++ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
  5102. ++ ar->debug.fw_dbglog_level);
  5103. + if (ret) {
  5104. +- ath10k_warn("dbglog cfg failed from debugfs: %d\n",
  5105. ++ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
  5106. + ret);
  5107. + goto exit;
  5108. + }
  5109. +@@ -718,6 +1470,166 @@ exit:
  5110. + return ret;
  5111. + }
  5112. +
  5113. ++/* TODO: Would be nice to always support ethtool stats, would need to
  5114. ++ * move the stats storage out of ath10k_debug, or always have ath10k_debug
  5115. ++ * struct available..
  5116. ++ */
  5117. ++
  5118. ++/* This generally cooresponds to the debugfs fw_stats file */
  5119. ++static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
  5120. ++ "tx_pkts_nic",
  5121. ++ "tx_bytes_nic",
  5122. ++ "rx_pkts_nic",
  5123. ++ "rx_bytes_nic",
  5124. ++ "d_noise_floor",
  5125. ++ "d_cycle_count",
  5126. ++ "d_phy_error",
  5127. ++ "d_rts_bad",
  5128. ++ "d_rts_good",
  5129. ++ "d_tx_power", /* in .5 dbM I think */
  5130. ++ "d_rx_crc_err", /* fcs_bad */
  5131. ++ "d_no_beacon",
  5132. ++ "d_tx_mpdus_queued",
  5133. ++ "d_tx_msdu_queued",
  5134. ++ "d_tx_msdu_dropped",
  5135. ++ "d_local_enqued",
  5136. ++ "d_local_freed",
  5137. ++ "d_tx_ppdu_hw_queued",
  5138. ++ "d_tx_ppdu_reaped",
  5139. ++ "d_tx_fifo_underrun",
  5140. ++ "d_tx_ppdu_abort",
  5141. ++ "d_tx_mpdu_requed",
  5142. ++ "d_tx_excessive_retries",
  5143. ++ "d_tx_hw_rate",
  5144. ++ "d_tx_dropped_sw_retries",
  5145. ++ "d_tx_illegal_rate",
  5146. ++ "d_tx_continuous_xretries",
  5147. ++ "d_tx_timeout",
  5148. ++ "d_tx_mpdu_txop_limit",
  5149. ++ "d_pdev_resets",
  5150. ++ "d_rx_mid_ppdu_route_change",
  5151. ++ "d_rx_status",
  5152. ++ "d_rx_extra_frags_ring0",
  5153. ++ "d_rx_extra_frags_ring1",
  5154. ++ "d_rx_extra_frags_ring2",
  5155. ++ "d_rx_extra_frags_ring3",
  5156. ++ "d_rx_msdu_htt",
  5157. ++ "d_rx_mpdu_htt",
  5158. ++ "d_rx_msdu_stack",
  5159. ++ "d_rx_mpdu_stack",
  5160. ++ "d_rx_phy_err",
  5161. ++ "d_rx_phy_err_drops",
  5162. ++ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
  5163. ++ "d_fw_crash_count",
  5164. ++ "d_fw_warm_reset_count",
  5165. ++ "d_fw_cold_reset_count",
  5166. ++};
  5167. ++
  5168. ++#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
  5169. ++
  5170. ++void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
  5171. ++ struct ieee80211_vif *vif,
  5172. ++ u32 sset, u8 *data)
  5173. ++{
  5174. ++ if (sset == ETH_SS_STATS)
  5175. ++ memcpy(data, *ath10k_gstrings_stats,
  5176. ++ sizeof(ath10k_gstrings_stats));
  5177. ++}
  5178. ++
  5179. ++int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
  5180. ++ struct ieee80211_vif *vif, int sset)
  5181. ++{
  5182. ++ if (sset == ETH_SS_STATS)
  5183. ++ return ATH10K_SSTATS_LEN;
  5184. ++
  5185. ++ return 0;
  5186. ++}
  5187. ++
  5188. ++void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
  5189. ++ struct ieee80211_vif *vif,
  5190. ++ struct ethtool_stats *stats, u64 *data)
  5191. ++{
  5192. ++ struct ath10k *ar = hw->priv;
  5193. ++ static const struct ath10k_fw_stats_pdev zero_stats = {};
  5194. ++ const struct ath10k_fw_stats_pdev *pdev_stats;
  5195. ++ int i = 0, ret;
  5196. ++
  5197. ++ mutex_lock(&ar->conf_mutex);
  5198. ++
  5199. ++ if (ar->state == ATH10K_STATE_ON) {
  5200. ++ ret = ath10k_debug_fw_stats_request(ar);
  5201. ++ if (ret) {
  5202. ++ /* just print a warning and try to use older results */
  5203. ++ ath10k_warn(ar,
  5204. ++ "failed to get fw stats for ethtool: %d\n",
  5205. ++ ret);
  5206. ++ }
  5207. ++ }
  5208. ++
  5209. ++ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
  5210. ++ struct ath10k_fw_stats_pdev,
  5211. ++ list);
  5212. ++ if (!pdev_stats) {
  5213. ++ /* no results available so just return zeroes */
  5214. ++ pdev_stats = &zero_stats;
  5215. ++ }
  5216. ++
  5217. ++ spin_lock_bh(&ar->data_lock);
  5218. ++
  5219. ++ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
  5220. ++ data[i++] = 0; /* tx bytes */
  5221. ++ data[i++] = pdev_stats->htt_mpdus;
  5222. ++ data[i++] = 0; /* rx bytes */
  5223. ++ data[i++] = pdev_stats->ch_noise_floor;
  5224. ++ data[i++] = pdev_stats->cycle_count;
  5225. ++ data[i++] = pdev_stats->phy_err_count;
  5226. ++ data[i++] = pdev_stats->rts_bad;
  5227. ++ data[i++] = pdev_stats->rts_good;
  5228. ++ data[i++] = pdev_stats->chan_tx_power;
  5229. ++ data[i++] = pdev_stats->fcs_bad;
  5230. ++ data[i++] = pdev_stats->no_beacons;
  5231. ++ data[i++] = pdev_stats->mpdu_enqued;
  5232. ++ data[i++] = pdev_stats->msdu_enqued;
  5233. ++ data[i++] = pdev_stats->wmm_drop;
  5234. ++ data[i++] = pdev_stats->local_enqued;
  5235. ++ data[i++] = pdev_stats->local_freed;
  5236. ++ data[i++] = pdev_stats->hw_queued;
  5237. ++ data[i++] = pdev_stats->hw_reaped;
  5238. ++ data[i++] = pdev_stats->underrun;
  5239. ++ data[i++] = pdev_stats->tx_abort;
  5240. ++ data[i++] = pdev_stats->mpdus_requed;
  5241. ++ data[i++] = pdev_stats->tx_ko;
  5242. ++ data[i++] = pdev_stats->data_rc;
  5243. ++ data[i++] = pdev_stats->sw_retry_failure;
  5244. ++ data[i++] = pdev_stats->illgl_rate_phy_err;
  5245. ++ data[i++] = pdev_stats->pdev_cont_xretry;
  5246. ++ data[i++] = pdev_stats->pdev_tx_timeout;
  5247. ++ data[i++] = pdev_stats->txop_ovf;
  5248. ++ data[i++] = pdev_stats->pdev_resets;
  5249. ++ data[i++] = pdev_stats->mid_ppdu_route_change;
  5250. ++ data[i++] = pdev_stats->status_rcvd;
  5251. ++ data[i++] = pdev_stats->r0_frags;
  5252. ++ data[i++] = pdev_stats->r1_frags;
  5253. ++ data[i++] = pdev_stats->r2_frags;
  5254. ++ data[i++] = pdev_stats->r3_frags;
  5255. ++ data[i++] = pdev_stats->htt_msdus;
  5256. ++ data[i++] = pdev_stats->htt_mpdus;
  5257. ++ data[i++] = pdev_stats->loc_msdus;
  5258. ++ data[i++] = pdev_stats->loc_mpdus;
  5259. ++ data[i++] = pdev_stats->phy_errs;
  5260. ++ data[i++] = pdev_stats->phy_err_drop;
  5261. ++ data[i++] = pdev_stats->mpdu_errs;
  5262. ++ data[i++] = ar->stats.fw_crash_counter;
  5263. ++ data[i++] = ar->stats.fw_warm_reset_counter;
  5264. ++ data[i++] = ar->stats.fw_cold_reset_counter;
  5265. ++
  5266. ++ spin_unlock_bh(&ar->data_lock);
  5267. ++
  5268. ++ mutex_unlock(&ar->conf_mutex);
  5269. ++
  5270. ++ WARN_ON(i != ATH10K_SSTATS_LEN);
  5271. ++}
  5272. ++
  5273. + static const struct file_operations fops_fw_dbglog = {
  5274. + .read = ath10k_read_fw_dbglog,
  5275. + .write = ath10k_write_fw_dbglog,
  5276. +@@ -726,6 +1638,151 @@ static const struct file_operations fops
  5277. + .llseek = default_llseek,
  5278. + };
  5279. +
  5280. ++static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
  5281. ++{
  5282. ++ struct ath10k *ar = inode->i_private;
  5283. ++ void *buf;
  5284. ++ u32 hi_addr;
  5285. ++ __le32 addr;
  5286. ++ int ret;
  5287. ++
  5288. ++ mutex_lock(&ar->conf_mutex);
  5289. ++
  5290. ++ if (ar->state != ATH10K_STATE_ON &&
  5291. ++ ar->state != ATH10K_STATE_UTF) {
  5292. ++ ret = -ENETDOWN;
  5293. ++ goto err;
  5294. ++ }
  5295. ++
  5296. ++ buf = vmalloc(QCA988X_CAL_DATA_LEN);
  5297. ++ if (!buf) {
  5298. ++ ret = -ENOMEM;
  5299. ++ goto err;
  5300. ++ }
  5301. ++
  5302. ++ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
  5303. ++
  5304. ++ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
  5305. ++ if (ret) {
  5306. ++ ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
  5307. ++ goto err_vfree;
  5308. ++ }
  5309. ++
  5310. ++ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
  5311. ++ QCA988X_CAL_DATA_LEN);
  5312. ++ if (ret) {
  5313. ++ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
  5314. ++ goto err_vfree;
  5315. ++ }
  5316. ++
  5317. ++ file->private_data = buf;
  5318. ++
  5319. ++ mutex_unlock(&ar->conf_mutex);
  5320. ++
  5321. ++ return 0;
  5322. ++
  5323. ++err_vfree:
  5324. ++ vfree(buf);
  5325. ++
  5326. ++err:
  5327. ++ mutex_unlock(&ar->conf_mutex);
  5328. ++
  5329. ++ return ret;
  5330. ++}
  5331. ++
  5332. ++static ssize_t ath10k_debug_cal_data_read(struct file *file,
  5333. ++ char __user *user_buf,
  5334. ++ size_t count, loff_t *ppos)
  5335. ++{
  5336. ++ void *buf = file->private_data;
  5337. ++
  5338. ++ return simple_read_from_buffer(user_buf, count, ppos,
  5339. ++ buf, QCA988X_CAL_DATA_LEN);
  5340. ++}
  5341. ++
  5342. ++static int ath10k_debug_cal_data_release(struct inode *inode,
  5343. ++ struct file *file)
  5344. ++{
  5345. ++ vfree(file->private_data);
  5346. ++
  5347. ++ return 0;
  5348. ++}
  5349. ++
  5350. ++static const struct file_operations fops_cal_data = {
  5351. ++ .open = ath10k_debug_cal_data_open,
  5352. ++ .read = ath10k_debug_cal_data_read,
  5353. ++ .release = ath10k_debug_cal_data_release,
  5354. ++ .owner = THIS_MODULE,
  5355. ++ .llseek = default_llseek,
  5356. ++};
  5357. ++
  5358. ++static ssize_t ath10k_read_nf_cal_period(struct file *file,
  5359. ++ char __user *user_buf,
  5360. ++ size_t count, loff_t *ppos)
  5361. ++{
  5362. ++ struct ath10k *ar = file->private_data;
  5363. ++ unsigned int len;
  5364. ++ char buf[32];
  5365. ++
  5366. ++ len = scnprintf(buf, sizeof(buf), "%d\n",
  5367. ++ ar->debug.nf_cal_period);
  5368. ++
  5369. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  5370. ++}
  5371. ++
  5372. ++static ssize_t ath10k_write_nf_cal_period(struct file *file,
  5373. ++ const char __user *user_buf,
  5374. ++ size_t count, loff_t *ppos)
  5375. ++{
  5376. ++ struct ath10k *ar = file->private_data;
  5377. ++ unsigned long period;
  5378. ++ int ret;
  5379. ++
  5380. ++ ret = kstrtoul_from_user(user_buf, count, 0, &period);
  5381. ++ if (ret)
  5382. ++ return ret;
  5383. ++
  5384. ++ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
  5385. ++ return -EINVAL;
  5386. ++
  5387. ++ /* there's no way to switch back to the firmware default */
  5388. ++ if (period == 0)
  5389. ++ return -EINVAL;
  5390. ++
  5391. ++ mutex_lock(&ar->conf_mutex);
  5392. ++
  5393. ++ ar->debug.nf_cal_period = period;
  5394. ++
  5395. ++ if (ar->state != ATH10K_STATE_ON) {
  5396. ++ /* firmware is not running, nothing else to do */
  5397. ++ ret = count;
  5398. ++ goto exit;
  5399. ++ }
  5400. ++
  5401. ++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
  5402. ++ ar->debug.nf_cal_period);
  5403. ++ if (ret) {
  5404. ++ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
  5405. ++ ret);
  5406. ++ goto exit;
  5407. ++ }
  5408. ++
  5409. ++ ret = count;
  5410. ++
  5411. ++exit:
  5412. ++ mutex_unlock(&ar->conf_mutex);
  5413. ++
  5414. ++ return ret;
  5415. ++}
  5416. ++
  5417. ++static const struct file_operations fops_nf_cal_period = {
  5418. ++ .read = ath10k_read_nf_cal_period,
  5419. ++ .write = ath10k_write_nf_cal_period,
  5420. ++ .open = simple_open,
  5421. ++ .owner = THIS_MODULE,
  5422. ++ .llseek = default_llseek,
  5423. ++};
  5424. ++
  5425. + int ath10k_debug_start(struct ath10k *ar)
  5426. + {
  5427. + int ret;
  5428. +@@ -735,17 +1792,44 @@ int ath10k_debug_start(struct ath10k *ar
  5429. + ret = ath10k_debug_htt_stats_req(ar);
  5430. + if (ret)
  5431. + /* continue normally anyway, this isn't serious */
  5432. +- ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
  5433. ++ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
  5434. ++ ret);
  5435. +
  5436. + if (ar->debug.fw_dbglog_mask) {
  5437. +- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
  5438. ++ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
  5439. ++ ATH10K_DBGLOG_LEVEL_WARN);
  5440. + if (ret)
  5441. + /* not serious */
  5442. +- ath10k_warn("failed to enable dbglog during start: %d",
  5443. ++ ath10k_warn(ar, "failed to enable dbglog during start: %d",
  5444. + ret);
  5445. + }
  5446. +
  5447. +- return 0;
  5448. ++ if (ar->debug.pktlog_filter) {
  5449. ++ ret = ath10k_wmi_pdev_pktlog_enable(ar,
  5450. ++ ar->debug.pktlog_filter);
  5451. ++ if (ret)
  5452. ++ /* not serious */
  5453. ++ ath10k_warn(ar,
  5454. ++ "failed to enable pktlog filter %x: %d\n",
  5455. ++ ar->debug.pktlog_filter, ret);
  5456. ++ } else {
  5457. ++ ret = ath10k_wmi_pdev_pktlog_disable(ar);
  5458. ++ if (ret)
  5459. ++ /* not serious */
  5460. ++ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
  5461. ++ }
  5462. ++
  5463. ++ if (ar->debug.nf_cal_period) {
  5464. ++ ret = ath10k_wmi_pdev_set_param(ar,
  5465. ++ ar->wmi.pdev_param->cal_period,
  5466. ++ ar->debug.nf_cal_period);
  5467. ++ if (ret)
  5468. ++ /* not serious */
  5469. ++ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
  5470. ++ ret);
  5471. ++ }
  5472. ++
  5473. ++ return ret;
  5474. + }
  5475. +
  5476. + void ath10k_debug_stop(struct ath10k *ar)
  5477. +@@ -757,6 +1841,11 @@ void ath10k_debug_stop(struct ath10k *ar
  5478. + * warning from del_timer(). */
  5479. + if (ar->debug.htt_stats_mask != 0)
  5480. + cancel_delayed_work(&ar->debug.htt_stats_dwork);
  5481. ++
  5482. ++ ar->debug.htt_max_amsdu = 0;
  5483. ++ ar->debug.htt_max_ampdu = 0;
  5484. ++
  5485. ++ ath10k_wmi_pdev_pktlog_disable(ar);
  5486. + }
  5487. +
  5488. + static ssize_t ath10k_write_simulate_radar(struct file *file,
  5489. +@@ -839,37 +1928,149 @@ static const struct file_operations fops
  5490. + .llseek = default_llseek,
  5491. + };
  5492. +
  5493. ++static ssize_t ath10k_write_pktlog_filter(struct file *file,
  5494. ++ const char __user *ubuf,
  5495. ++ size_t count, loff_t *ppos)
  5496. ++{
  5497. ++ struct ath10k *ar = file->private_data;
  5498. ++ u32 filter;
  5499. ++ int ret;
  5500. ++
  5501. ++ if (kstrtouint_from_user(ubuf, count, 0, &filter))
  5502. ++ return -EINVAL;
  5503. ++
  5504. ++ mutex_lock(&ar->conf_mutex);
  5505. ++
  5506. ++ if (ar->state != ATH10K_STATE_ON) {
  5507. ++ ar->debug.pktlog_filter = filter;
  5508. ++ ret = count;
  5509. ++ goto out;
  5510. ++ }
  5511. ++
  5512. ++ if (filter && (filter != ar->debug.pktlog_filter)) {
  5513. ++ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
  5514. ++ if (ret) {
  5515. ++ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
  5516. ++ ar->debug.pktlog_filter, ret);
  5517. ++ goto out;
  5518. ++ }
  5519. ++ } else {
  5520. ++ ret = ath10k_wmi_pdev_pktlog_disable(ar);
  5521. ++ if (ret) {
  5522. ++ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
  5523. ++ goto out;
  5524. ++ }
  5525. ++ }
  5526. ++
  5527. ++ ar->debug.pktlog_filter = filter;
  5528. ++ ret = count;
  5529. ++
  5530. ++out:
  5531. ++ mutex_unlock(&ar->conf_mutex);
  5532. ++ return ret;
  5533. ++}
  5534. ++
  5535. ++static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
  5536. ++ size_t count, loff_t *ppos)
  5537. ++{
  5538. ++ char buf[32];
  5539. ++ struct ath10k *ar = file->private_data;
  5540. ++ int len = 0;
  5541. ++
  5542. ++ mutex_lock(&ar->conf_mutex);
  5543. ++ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
  5544. ++ ar->debug.pktlog_filter);
  5545. ++ mutex_unlock(&ar->conf_mutex);
  5546. ++
  5547. ++ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
  5548. ++}
  5549. ++
  5550. ++static const struct file_operations fops_pktlog_filter = {
  5551. ++ .read = ath10k_read_pktlog_filter,
  5552. ++ .write = ath10k_write_pktlog_filter,
  5553. ++ .open = simple_open
  5554. ++};
  5555. ++
  5556. + int ath10k_debug_create(struct ath10k *ar)
  5557. + {
  5558. ++ ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
  5559. ++ if (!ar->debug.fw_crash_data)
  5560. ++ return -ENOMEM;
  5561. ++
  5562. ++ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
  5563. ++ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
  5564. ++ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
  5565. ++
  5566. ++ return 0;
  5567. ++}
  5568. ++
  5569. ++void ath10k_debug_destroy(struct ath10k *ar)
  5570. ++{
  5571. ++ vfree(ar->debug.fw_crash_data);
  5572. ++ ar->debug.fw_crash_data = NULL;
  5573. ++
  5574. ++ ath10k_debug_fw_stats_reset(ar);
  5575. ++}
  5576. ++
  5577. ++int ath10k_debug_register(struct ath10k *ar)
  5578. ++{
  5579. + ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
  5580. + ar->hw->wiphy->debugfsdir);
  5581. ++ if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
  5582. ++ if (IS_ERR(ar->debug.debugfs_phy))
  5583. ++ return PTR_ERR(ar->debug.debugfs_phy);
  5584. +
  5585. +- if (!ar->debug.debugfs_phy)
  5586. + return -ENOMEM;
  5587. ++ }
  5588. +
  5589. + INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
  5590. + ath10k_debug_htt_stats_dwork);
  5591. +
  5592. +- init_completion(&ar->debug.event_stats_compl);
  5593. ++ init_completion(&ar->debug.fw_stats_complete);
  5594. +
  5595. + debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
  5596. + &fops_fw_stats);
  5597. +
  5598. ++ debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
  5599. ++ ar, &fops_fw_reset_stats);
  5600. ++
  5601. + debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
  5602. + &fops_wmi_services);
  5603. +
  5604. + debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
  5605. + ar, &fops_simulate_fw_crash);
  5606. +
  5607. ++ debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
  5608. ++ ar, &fops_fw_crash_dump);
  5609. ++
  5610. ++ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
  5611. ++ ar->debug.debugfs_phy, ar, &fops_reg_addr);
  5612. ++
  5613. ++ debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
  5614. ++ ar->debug.debugfs_phy, ar, &fops_reg_value);
  5615. ++
  5616. ++ debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
  5617. ++ ar->debug.debugfs_phy, ar, &fops_mem_value);
  5618. ++
  5619. + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
  5620. + ar, &fops_chip_id);
  5621. +
  5622. + debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
  5623. + ar, &fops_htt_stats_mask);
  5624. +
  5625. ++ debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
  5626. ++ ar->debug.debugfs_phy, ar,
  5627. ++ &fops_htt_max_amsdu_ampdu);
  5628. ++
  5629. + debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
  5630. + ar, &fops_fw_dbglog);
  5631. +
  5632. ++ debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
  5633. ++ ar, &fops_cal_data);
  5634. ++
  5635. ++ debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
  5636. ++ ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
  5637. ++
  5638. + if (config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED)) {
  5639. + debugfs_create_file("dfs_simulate_radar", S_IWUSR,
  5640. + ar->debug.debugfs_phy, ar,
  5641. +@@ -884,10 +2085,13 @@ int ath10k_debug_create(struct ath10k *a
  5642. + &fops_dfs_stats);
  5643. + }
  5644. +
  5645. ++ debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
  5646. ++ ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
  5647. ++
  5648. + return 0;
  5649. + }
  5650. +
  5651. +-void ath10k_debug_destroy(struct ath10k *ar)
  5652. ++void ath10k_debug_unregister(struct ath10k *ar)
  5653. + {
  5654. + cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
  5655. + }
  5656. +@@ -895,7 +2099,8 @@ void ath10k_debug_destroy(struct ath10k
  5657. + #endif /* CPTCFG_ATH10K_DEBUGFS */
  5658. +
  5659. + #ifdef CPTCFG_ATH10K_DEBUG
  5660. +-void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
  5661. ++void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
  5662. ++ const char *fmt, ...)
  5663. + {
  5664. + struct va_format vaf;
  5665. + va_list args;
  5666. +@@ -906,27 +2111,43 @@ void ath10k_dbg(enum ath10k_debug_mask m
  5667. + vaf.va = &args;
  5668. +
  5669. + if (ath10k_debug_mask & mask)
  5670. +- ath10k_printk(KERN_DEBUG, "%pV", &vaf);
  5671. ++ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
  5672. +
  5673. +- trace_ath10k_log_dbg(mask, &vaf);
  5674. ++ trace_ath10k_log_dbg(ar, mask, &vaf);
  5675. +
  5676. + va_end(args);
  5677. + }
  5678. + EXPORT_SYMBOL(ath10k_dbg);
  5679. +
  5680. +-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
  5681. ++void ath10k_dbg_dump(struct ath10k *ar,
  5682. ++ enum ath10k_debug_mask mask,
  5683. + const char *msg, const char *prefix,
  5684. + const void *buf, size_t len)
  5685. + {
  5686. ++ char linebuf[256];
  5687. ++ unsigned int linebuflen;
  5688. ++ const void *ptr;
  5689. ++
  5690. + if (ath10k_debug_mask & mask) {
  5691. + if (msg)
  5692. +- ath10k_dbg(mask, "%s\n", msg);
  5693. ++ ath10k_dbg(ar, mask, "%s\n", msg);
  5694. +
  5695. +- print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
  5696. ++ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
  5697. ++ linebuflen = 0;
  5698. ++ linebuflen += scnprintf(linebuf + linebuflen,
  5699. ++ sizeof(linebuf) - linebuflen,
  5700. ++ "%s%08x: ",
  5701. ++ (prefix ? prefix : ""),
  5702. ++ (unsigned int)(ptr - buf));
  5703. ++ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
  5704. ++ linebuf + linebuflen,
  5705. ++ sizeof(linebuf) - linebuflen, true);
  5706. ++ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
  5707. ++ }
  5708. + }
  5709. +
  5710. + /* tracing code doesn't like null strings :/ */
  5711. +- trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
  5712. ++ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
  5713. + buf, len);
  5714. + }
  5715. + EXPORT_SYMBOL(ath10k_dbg_dump);
  5716. +--- a/drivers/net/wireless/ath/ath10k/debug.h
  5717. ++++ b/drivers/net/wireless/ath/ath10k/debug.h
  5718. +@@ -34,28 +34,55 @@ enum ath10k_debug_mask {
  5719. + ATH10K_DBG_DATA = 0x00000200,
  5720. + ATH10K_DBG_BMI = 0x00000400,
  5721. + ATH10K_DBG_REGULATORY = 0x00000800,
  5722. ++ ATH10K_DBG_TESTMODE = 0x00001000,
  5723. ++ ATH10K_DBG_WMI_PRINT = 0x00002000,
  5724. + ATH10K_DBG_ANY = 0xffffffff,
  5725. + };
  5726. +
  5727. ++enum ath10k_pktlog_filter {
  5728. ++ ATH10K_PKTLOG_RX = 0x000000001,
  5729. ++ ATH10K_PKTLOG_TX = 0x000000002,
  5730. ++ ATH10K_PKTLOG_RCFIND = 0x000000004,
  5731. ++ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
  5732. ++ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
  5733. ++ ATH10K_PKTLOG_ANY = 0x00000001f,
  5734. ++};
  5735. ++
  5736. ++enum ath10k_dbg_aggr_mode {
  5737. ++ ATH10K_DBG_AGGR_MODE_AUTO,
  5738. ++ ATH10K_DBG_AGGR_MODE_MANUAL,
  5739. ++ ATH10K_DBG_AGGR_MODE_MAX,
  5740. ++};
  5741. ++
  5742. + extern unsigned int ath10k_debug_mask;
  5743. +
  5744. +-__printf(1, 2) int ath10k_info(const char *fmt, ...);
  5745. +-__printf(1, 2) int ath10k_err(const char *fmt, ...);
  5746. +-__printf(1, 2) int ath10k_warn(const char *fmt, ...);
  5747. ++__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
  5748. ++__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
  5749. ++__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
  5750. ++void ath10k_print_driver_info(struct ath10k *ar);
  5751. +
  5752. + #ifdef CPTCFG_ATH10K_DEBUGFS
  5753. + int ath10k_debug_start(struct ath10k *ar);
  5754. + void ath10k_debug_stop(struct ath10k *ar);
  5755. + int ath10k_debug_create(struct ath10k *ar);
  5756. + void ath10k_debug_destroy(struct ath10k *ar);
  5757. +-void ath10k_debug_read_service_map(struct ath10k *ar,
  5758. +- void *service_map,
  5759. +- size_t map_size);
  5760. +-void ath10k_debug_read_target_stats(struct ath10k *ar,
  5761. +- struct wmi_stats_event *ev);
  5762. ++int ath10k_debug_register(struct ath10k *ar);
  5763. ++void ath10k_debug_unregister(struct ath10k *ar);
  5764. ++void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
  5765. ++struct ath10k_fw_crash_data *
  5766. ++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
  5767. +
  5768. ++void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
  5769. + #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
  5770. +
  5771. ++void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
  5772. ++ struct ieee80211_vif *vif,
  5773. ++ u32 sset, u8 *data);
  5774. ++int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
  5775. ++ struct ieee80211_vif *vif, int sset);
  5776. ++void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
  5777. ++ struct ieee80211_vif *vif,
  5778. ++ struct ethtool_stats *stats, u64 *data);
  5779. + #else
  5780. + static inline int ath10k_debug_start(struct ath10k *ar)
  5781. + {
  5782. +@@ -75,36 +102,62 @@ static inline void ath10k_debug_destroy(
  5783. + {
  5784. + }
  5785. +
  5786. +-static inline void ath10k_debug_read_service_map(struct ath10k *ar,
  5787. +- void *service_map,
  5788. +- size_t map_size)
  5789. ++static inline int ath10k_debug_register(struct ath10k *ar)
  5790. ++{
  5791. ++ return 0;
  5792. ++}
  5793. ++
  5794. ++static inline void ath10k_debug_unregister(struct ath10k *ar)
  5795. + {
  5796. + }
  5797. +
  5798. +-static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
  5799. +- struct wmi_stats_event *ev)
  5800. ++static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
  5801. ++ struct sk_buff *skb)
  5802. + {
  5803. + }
  5804. +
  5805. ++static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
  5806. ++ int len)
  5807. ++{
  5808. ++}
  5809. ++
  5810. ++static inline struct ath10k_fw_crash_data *
  5811. ++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
  5812. ++{
  5813. ++ return NULL;
  5814. ++}
  5815. ++
  5816. + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
  5817. +
  5818. ++#define ath10k_debug_get_et_strings NULL
  5819. ++#define ath10k_debug_get_et_sset_count NULL
  5820. ++#define ath10k_debug_get_et_stats NULL
  5821. ++
  5822. + #endif /* CPTCFG_ATH10K_DEBUGFS */
  5823. ++#ifdef CPTCFG_MAC80211_DEBUGFS
  5824. ++void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  5825. ++ struct ieee80211_sta *sta, struct dentry *dir);
  5826. ++#endif /* CPTCFG_MAC80211_DEBUGFS */
  5827. +
  5828. + #ifdef CPTCFG_ATH10K_DEBUG
  5829. +-__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
  5830. ++__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
  5831. ++ enum ath10k_debug_mask mask,
  5832. + const char *fmt, ...);
  5833. +-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
  5834. ++void ath10k_dbg_dump(struct ath10k *ar,
  5835. ++ enum ath10k_debug_mask mask,
  5836. + const char *msg, const char *prefix,
  5837. + const void *buf, size_t len);
  5838. + #else /* CPTCFG_ATH10K_DEBUG */
  5839. +
  5840. +-static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
  5841. ++static inline int ath10k_dbg(struct ath10k *ar,
  5842. ++ enum ath10k_debug_mask dbg_mask,
  5843. + const char *fmt, ...)
  5844. + {
  5845. + return 0;
  5846. + }
  5847. +
  5848. +-static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
  5849. ++static inline void ath10k_dbg_dump(struct ath10k *ar,
  5850. ++ enum ath10k_debug_mask mask,
  5851. + const char *msg, const char *prefix,
  5852. + const void *buf, size_t len)
  5853. + {
  5854. +--- a/drivers/net/wireless/ath/ath10k/hif.h
  5855. ++++ b/drivers/net/wireless/ath/ath10k/hif.h
  5856. +@@ -20,6 +20,7 @@
  5857. +
  5858. + #include <linux/kernel.h>
  5859. + #include "core.h"
  5860. ++#include "debug.h"
  5861. +
  5862. + struct ath10k_hif_sg_item {
  5863. + u16 transfer_id;
  5864. +@@ -31,11 +32,9 @@ struct ath10k_hif_sg_item {
  5865. +
  5866. + struct ath10k_hif_cb {
  5867. + int (*tx_completion)(struct ath10k *ar,
  5868. +- struct sk_buff *wbuf,
  5869. +- unsigned transfer_id);
  5870. ++ struct sk_buff *wbuf);
  5871. + int (*rx_completion)(struct ath10k *ar,
  5872. +- struct sk_buff *wbuf,
  5873. +- u8 pipe_id);
  5874. ++ struct sk_buff *wbuf);
  5875. + };
  5876. +
  5877. + struct ath10k_hif_ops {
  5878. +@@ -43,6 +42,12 @@ struct ath10k_hif_ops {
  5879. + int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
  5880. + struct ath10k_hif_sg_item *items, int n_items);
  5881. +
  5882. ++ /* read firmware memory through the diagnose interface */
  5883. ++ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
  5884. ++ size_t buf_len);
  5885. ++
  5886. ++ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
  5887. ++ int nbytes);
  5888. + /*
  5889. + * API to handle HIF-specific BMI message exchanges, this API is
  5890. + * synchronous and only allowed to be called from a context that
  5891. +@@ -80,6 +85,10 @@ struct ath10k_hif_ops {
  5892. +
  5893. + u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
  5894. +
  5895. ++ u32 (*read32)(struct ath10k *ar, u32 address);
  5896. ++
  5897. ++ void (*write32)(struct ath10k *ar, u32 address, u32 value);
  5898. ++
  5899. + /* Power up the device and enter BMI transfer mode for FW download */
  5900. + int (*power_up)(struct ath10k *ar);
  5901. +
  5902. +@@ -91,7 +100,6 @@ struct ath10k_hif_ops {
  5903. + int (*resume)(struct ath10k *ar);
  5904. + };
  5905. +
  5906. +-
  5907. + static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  5908. + struct ath10k_hif_sg_item *items,
  5909. + int n_items)
  5910. +@@ -99,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struc
  5911. + return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
  5912. + }
  5913. +
  5914. ++static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
  5915. ++ size_t buf_len)
  5916. ++{
  5917. ++ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
  5918. ++}
  5919. ++
  5920. ++static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
  5921. ++ const void *data, int nbytes)
  5922. ++{
  5923. ++ if (!ar->hif.ops->diag_write)
  5924. ++ return -EOPNOTSUPP;
  5925. ++
  5926. ++ return ar->hif.ops->diag_write(ar, address, data, nbytes);
  5927. ++}
  5928. ++
  5929. + static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
  5930. + void *request, u32 request_len,
  5931. + void *response, u32 *response_len)
  5932. +@@ -178,4 +201,25 @@ static inline int ath10k_hif_resume(stru
  5933. + return ar->hif.ops->resume(ar);
  5934. + }
  5935. +
  5936. ++static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
  5937. ++{
  5938. ++ if (!ar->hif.ops->read32) {
  5939. ++ ath10k_warn(ar, "hif read32 not supported\n");
  5940. ++ return 0xdeaddead;
  5941. ++ }
  5942. ++
  5943. ++ return ar->hif.ops->read32(ar, address);
  5944. ++}
  5945. ++
  5946. ++static inline void ath10k_hif_write32(struct ath10k *ar,
  5947. ++ u32 address, u32 data)
  5948. ++{
  5949. ++ if (!ar->hif.ops->write32) {
  5950. ++ ath10k_warn(ar, "hif write32 not supported\n");
  5951. ++ return;
  5952. ++ }
  5953. ++
  5954. ++ ar->hif.ops->write32(ar, address, data);
  5955. ++}
  5956. ++
  5957. + #endif /* _HIF_H_ */
  5958. +--- a/drivers/net/wireless/ath/ath10k/htc.c
  5959. ++++ b/drivers/net/wireless/ath/ath10k/htc.c
  5960. +@@ -45,10 +45,8 @@ static struct sk_buff *ath10k_htc_build_
  5961. + struct ath10k_skb_cb *skb_cb;
  5962. +
  5963. + skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  5964. +- if (!skb) {
  5965. +- ath10k_warn("Unable to allocate ctrl skb\n");
  5966. ++ if (!skb)
  5967. + return NULL;
  5968. +- }
  5969. +
  5970. + skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  5971. + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  5972. +@@ -56,7 +54,7 @@ static struct sk_buff *ath10k_htc_build_
  5973. + skb_cb = ATH10K_SKB_CB(skb);
  5974. + memset(skb_cb, 0, sizeof(*skb_cb));
  5975. +
  5976. +- ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
  5977. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
  5978. + return skb;
  5979. + }
  5980. +
  5981. +@@ -72,13 +70,15 @@ static inline void ath10k_htc_restore_tx
  5982. + static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  5983. + struct sk_buff *skb)
  5984. + {
  5985. +- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
  5986. ++ struct ath10k *ar = ep->htc->ar;
  5987. ++
  5988. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
  5989. + ep->eid, skb);
  5990. +
  5991. + ath10k_htc_restore_tx_skb(ep->htc, skb);
  5992. +
  5993. + if (!ep->ep_ops.ep_tx_complete) {
  5994. +- ath10k_warn("no tx handler for eid %d\n", ep->eid);
  5995. ++ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
  5996. + dev_kfree_skb_any(skb);
  5997. + return;
  5998. + }
  5999. +@@ -89,12 +89,14 @@ static void ath10k_htc_notify_tx_complet
  6000. + /* assumes tx_lock is held */
  6001. + static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
  6002. + {
  6003. ++ struct ath10k *ar = ep->htc->ar;
  6004. ++
  6005. + if (!ep->tx_credit_flow_enabled)
  6006. + return false;
  6007. + if (ep->tx_credits >= ep->tx_credits_per_max_message)
  6008. + return false;
  6009. +
  6010. +- ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
  6011. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
  6012. + ep->eid);
  6013. + return true;
  6014. + }
  6015. +@@ -123,6 +125,7 @@ int ath10k_htc_send(struct ath10k_htc *h
  6016. + enum ath10k_htc_ep_id eid,
  6017. + struct sk_buff *skb)
  6018. + {
  6019. ++ struct ath10k *ar = htc->ar;
  6020. + struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  6021. + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  6022. + struct ath10k_hif_sg_item sg_item;
  6023. +@@ -134,18 +137,10 @@ int ath10k_htc_send(struct ath10k_htc *h
  6024. + return -ECOMM;
  6025. +
  6026. + if (eid >= ATH10K_HTC_EP_COUNT) {
  6027. +- ath10k_warn("Invalid endpoint id: %d\n", eid);
  6028. ++ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
  6029. + return -ENOENT;
  6030. + }
  6031. +
  6032. +- /* FIXME: This looks ugly, can we fix it? */
  6033. +- spin_lock_bh(&htc->tx_lock);
  6034. +- if (htc->stopped) {
  6035. +- spin_unlock_bh(&htc->tx_lock);
  6036. +- return -ESHUTDOWN;
  6037. +- }
  6038. +- spin_unlock_bh(&htc->tx_lock);
  6039. +-
  6040. + skb_push(skb, sizeof(struct ath10k_htc_hdr));
  6041. +
  6042. + if (ep->tx_credit_flow_enabled) {
  6043. +@@ -157,7 +152,7 @@ int ath10k_htc_send(struct ath10k_htc *h
  6044. + goto err_pull;
  6045. + }
  6046. + ep->tx_credits -= credits;
  6047. +- ath10k_dbg(ATH10K_DBG_HTC,
  6048. ++ ath10k_dbg(ar, ATH10K_DBG_HTC,
  6049. + "htc ep %d consumed %d credits (total %d)\n",
  6050. + eid, credits, ep->tx_credits);
  6051. + spin_unlock_bh(&htc->tx_lock);
  6052. +@@ -165,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *h
  6053. +
  6054. + ath10k_htc_prepare_tx_skb(ep, skb);
  6055. +
  6056. ++ skb_cb->eid = eid;
  6057. + skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
  6058. + ret = dma_mapping_error(dev, skb_cb->paddr);
  6059. + if (ret)
  6060. +@@ -188,7 +184,7 @@ err_credits:
  6061. + if (ep->tx_credit_flow_enabled) {
  6062. + spin_lock_bh(&htc->tx_lock);
  6063. + ep->tx_credits += credits;
  6064. +- ath10k_dbg(ATH10K_DBG_HTC,
  6065. ++ ath10k_dbg(ar, ATH10K_DBG_HTC,
  6066. + "htc ep %d reverted %d credits back (total %d)\n",
  6067. + eid, credits, ep->tx_credits);
  6068. + spin_unlock_bh(&htc->tx_lock);
  6069. +@@ -202,15 +198,18 @@ err_pull:
  6070. + }
  6071. +
  6072. + static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
  6073. +- struct sk_buff *skb,
  6074. +- unsigned int eid)
  6075. ++ struct sk_buff *skb)
  6076. + {
  6077. + struct ath10k_htc *htc = &ar->htc;
  6078. +- struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  6079. ++ struct ath10k_skb_cb *skb_cb;
  6080. ++ struct ath10k_htc_ep *ep;
  6081. +
  6082. + if (WARN_ON_ONCE(!skb))
  6083. + return 0;
  6084. +
  6085. ++ skb_cb = ATH10K_SKB_CB(skb);
  6086. ++ ep = &htc->endpoint[skb_cb->eid];
  6087. ++
  6088. + ath10k_htc_notify_tx_completion(ep, skb);
  6089. + /* the skb now belongs to the completion handler */
  6090. +
  6091. +@@ -227,11 +226,12 @@ ath10k_htc_process_credit_report(struct
  6092. + int len,
  6093. + enum ath10k_htc_ep_id eid)
  6094. + {
  6095. ++ struct ath10k *ar = htc->ar;
  6096. + struct ath10k_htc_ep *ep;
  6097. + int i, n_reports;
  6098. +
  6099. + if (len % sizeof(*report))
  6100. +- ath10k_warn("Uneven credit report len %d", len);
  6101. ++ ath10k_warn(ar, "Uneven credit report len %d", len);
  6102. +
  6103. + n_reports = len / sizeof(*report);
  6104. +
  6105. +@@ -243,7 +243,7 @@ ath10k_htc_process_credit_report(struct
  6106. + ep = &htc->endpoint[report->eid];
  6107. + ep->tx_credits += report->credits;
  6108. +
  6109. +- ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
  6110. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
  6111. + report->eid, report->credits, ep->tx_credits);
  6112. +
  6113. + if (ep->ep_ops.ep_tx_credits) {
  6114. +@@ -260,6 +260,7 @@ static int ath10k_htc_process_trailer(st
  6115. + int length,
  6116. + enum ath10k_htc_ep_id src_eid)
  6117. + {
  6118. ++ struct ath10k *ar = htc->ar;
  6119. + int status = 0;
  6120. + struct ath10k_htc_record *record;
  6121. + u8 *orig_buffer;
  6122. +@@ -279,7 +280,7 @@ static int ath10k_htc_process_trailer(st
  6123. +
  6124. + if (record->hdr.len > length) {
  6125. + /* no room left in buffer for record */
  6126. +- ath10k_warn("Invalid record length: %d\n",
  6127. ++ ath10k_warn(ar, "Invalid record length: %d\n",
  6128. + record->hdr.len);
  6129. + status = -EINVAL;
  6130. + break;
  6131. +@@ -289,7 +290,7 @@ static int ath10k_htc_process_trailer(st
  6132. + case ATH10K_HTC_RECORD_CREDITS:
  6133. + len = sizeof(struct ath10k_htc_credit_report);
  6134. + if (record->hdr.len < len) {
  6135. +- ath10k_warn("Credit report too long\n");
  6136. ++ ath10k_warn(ar, "Credit report too long\n");
  6137. + status = -EINVAL;
  6138. + break;
  6139. + }
  6140. +@@ -299,7 +300,7 @@ static int ath10k_htc_process_trailer(st
  6141. + src_eid);
  6142. + break;
  6143. + default:
  6144. +- ath10k_warn("Unhandled record: id:%d length:%d\n",
  6145. ++ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
  6146. + record->hdr.id, record->hdr.len);
  6147. + break;
  6148. + }
  6149. +@@ -313,15 +314,14 @@ static int ath10k_htc_process_trailer(st
  6150. + }
  6151. +
  6152. + if (status)
  6153. +- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
  6154. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
  6155. + orig_buffer, orig_length);
  6156. +
  6157. + return status;
  6158. + }
  6159. +
  6160. + static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
  6161. +- struct sk_buff *skb,
  6162. +- u8 pipe_id)
  6163. ++ struct sk_buff *skb)
  6164. + {
  6165. + int status = 0;
  6166. + struct ath10k_htc *htc = &ar->htc;
  6167. +@@ -339,8 +339,8 @@ static int ath10k_htc_rx_completion_hand
  6168. + eid = hdr->eid;
  6169. +
  6170. + if (eid >= ATH10K_HTC_EP_COUNT) {
  6171. +- ath10k_warn("HTC Rx: invalid eid %d\n", eid);
  6172. +- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
  6173. ++ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
  6174. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
  6175. + hdr, sizeof(*hdr));
  6176. + status = -EINVAL;
  6177. + goto out;
  6178. +@@ -360,19 +360,19 @@ static int ath10k_htc_rx_completion_hand
  6179. + payload_len = __le16_to_cpu(hdr->len);
  6180. +
  6181. + if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
  6182. +- ath10k_warn("HTC rx frame too long, len: %zu\n",
  6183. ++ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
  6184. + payload_len + sizeof(*hdr));
  6185. +- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
  6186. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
  6187. + hdr, sizeof(*hdr));
  6188. + status = -EINVAL;
  6189. + goto out;
  6190. + }
  6191. +
  6192. + if (skb->len < payload_len) {
  6193. +- ath10k_dbg(ATH10K_DBG_HTC,
  6194. ++ ath10k_dbg(ar, ATH10K_DBG_HTC,
  6195. + "HTC Rx: insufficient length, got %d, expected %d\n",
  6196. + skb->len, payload_len);
  6197. +- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
  6198. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
  6199. + "", hdr, sizeof(*hdr));
  6200. + status = -EINVAL;
  6201. + goto out;
  6202. +@@ -388,7 +388,7 @@ static int ath10k_htc_rx_completion_hand
  6203. +
  6204. + if ((trailer_len < min_len) ||
  6205. + (trailer_len > payload_len)) {
  6206. +- ath10k_warn("Invalid trailer length: %d\n",
  6207. ++ ath10k_warn(ar, "Invalid trailer length: %d\n",
  6208. + trailer_len);
  6209. + status = -EPROTO;
  6210. + goto out;
  6211. +@@ -421,7 +421,7 @@ static int ath10k_htc_rx_completion_hand
  6212. + * this is a fatal error, target should not be
  6213. + * sending unsolicited messages on the ep 0
  6214. + */
  6215. +- ath10k_warn("HTC rx ctrl still processing\n");
  6216. ++ ath10k_warn(ar, "HTC rx ctrl still processing\n");
  6217. + status = -EINVAL;
  6218. + complete(&htc->ctl_resp);
  6219. + goto out;
  6220. +@@ -442,7 +442,7 @@ static int ath10k_htc_rx_completion_hand
  6221. + goto out;
  6222. + }
  6223. +
  6224. +- ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
  6225. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
  6226. + eid, skb);
  6227. + ep->ep_ops.ep_rx_complete(ar, skb);
  6228. +
  6229. +@@ -459,7 +459,7 @@ static void ath10k_htc_control_rx_comple
  6230. + {
  6231. + /* This is unexpected. FW is not supposed to send regular rx on this
  6232. + * endpoint. */
  6233. +- ath10k_warn("unexpected htc rx\n");
  6234. ++ ath10k_warn(ar, "unexpected htc rx\n");
  6235. + kfree_skb(skb);
  6236. + }
  6237. +
  6238. +@@ -546,7 +546,8 @@ static u8 ath10k_htc_get_credit_allocati
  6239. +
  6240. + int ath10k_htc_wait_target(struct ath10k_htc *htc)
  6241. + {
  6242. +- int status = 0;
  6243. ++ struct ath10k *ar = htc->ar;
  6244. ++ int i, status = 0;
  6245. + struct ath10k_htc_svc_conn_req conn_req;
  6246. + struct ath10k_htc_svc_conn_resp conn_resp;
  6247. + struct ath10k_htc_msg *msg;
  6248. +@@ -556,16 +557,32 @@ int ath10k_htc_wait_target(struct ath10k
  6249. +
  6250. + status = wait_for_completion_timeout(&htc->ctl_resp,
  6251. + ATH10K_HTC_WAIT_TIMEOUT_HZ);
  6252. +- if (status <= 0) {
  6253. ++ if (status == 0) {
  6254. ++ /* Workaround: In some cases the PCI HIF doesn't
  6255. ++ * receive interrupt for the control response message
  6256. ++ * even if the buffer was completed. It is suspected
  6257. ++ * iomap writes unmasking PCI CE irqs aren't propagated
  6258. ++ * properly in KVM PCI-passthrough sometimes.
  6259. ++ */
  6260. ++ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
  6261. ++
  6262. ++ for (i = 0; i < CE_COUNT; i++)
  6263. ++ ath10k_hif_send_complete_check(htc->ar, i, 1);
  6264. ++
  6265. ++ status = wait_for_completion_timeout(&htc->ctl_resp,
  6266. ++ ATH10K_HTC_WAIT_TIMEOUT_HZ);
  6267. ++
  6268. + if (status == 0)
  6269. + status = -ETIMEDOUT;
  6270. ++ }
  6271. +
  6272. +- ath10k_err("ctl_resp never came in (%d)\n", status);
  6273. ++ if (status < 0) {
  6274. ++ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
  6275. + return status;
  6276. + }
  6277. +
  6278. + if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
  6279. +- ath10k_err("Invalid HTC ready msg len:%d\n",
  6280. ++ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
  6281. + htc->control_resp_len);
  6282. + return -ECOMM;
  6283. + }
  6284. +@@ -576,21 +593,21 @@ int ath10k_htc_wait_target(struct ath10k
  6285. + credit_size = __le16_to_cpu(msg->ready.credit_size);
  6286. +
  6287. + if (message_id != ATH10K_HTC_MSG_READY_ID) {
  6288. +- ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
  6289. ++ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
  6290. + return -ECOMM;
  6291. + }
  6292. +
  6293. + htc->total_transmit_credits = credit_count;
  6294. + htc->target_credit_size = credit_size;
  6295. +
  6296. +- ath10k_dbg(ATH10K_DBG_HTC,
  6297. ++ ath10k_dbg(ar, ATH10K_DBG_HTC,
  6298. + "Target ready! transmit resources: %d size:%d\n",
  6299. + htc->total_transmit_credits,
  6300. + htc->target_credit_size);
  6301. +
  6302. + if ((htc->total_transmit_credits == 0) ||
  6303. + (htc->target_credit_size == 0)) {
  6304. +- ath10k_err("Invalid credit size received\n");
  6305. ++ ath10k_err(ar, "Invalid credit size received\n");
  6306. + return -ECOMM;
  6307. + }
  6308. +
  6309. +@@ -607,7 +624,8 @@ int ath10k_htc_wait_target(struct ath10k
  6310. + /* connect fake service */
  6311. + status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
  6312. + if (status) {
  6313. +- ath10k_err("could not connect to htc service (%d)\n", status);
  6314. ++ ath10k_err(ar, "could not connect to htc service (%d)\n",
  6315. ++ status);
  6316. + return status;
  6317. + }
  6318. +
  6319. +@@ -618,6 +636,7 @@ int ath10k_htc_connect_service(struct at
  6320. + struct ath10k_htc_svc_conn_req *conn_req,
  6321. + struct ath10k_htc_svc_conn_resp *conn_resp)
  6322. + {
  6323. ++ struct ath10k *ar = htc->ar;
  6324. + struct ath10k_htc_msg *msg;
  6325. + struct ath10k_htc_conn_svc *req_msg;
  6326. + struct ath10k_htc_conn_svc_response resp_msg_dummy;
  6327. +@@ -643,13 +662,13 @@ int ath10k_htc_connect_service(struct at
  6328. + tx_alloc = ath10k_htc_get_credit_allocation(htc,
  6329. + conn_req->service_id);
  6330. + if (!tx_alloc)
  6331. +- ath10k_dbg(ATH10K_DBG_BOOT,
  6332. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  6333. + "boot htc service %s does not allocate target credits\n",
  6334. + htc_service_name(conn_req->service_id));
  6335. +
  6336. + skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
  6337. + if (!skb) {
  6338. +- ath10k_err("Failed to allocate HTC packet\n");
  6339. ++ ath10k_err(ar, "Failed to allocate HTC packet\n");
  6340. + return -ENOMEM;
  6341. + }
  6342. +
  6343. +@@ -684,11 +703,9 @@ int ath10k_htc_connect_service(struct at
  6344. + /* wait for response */
  6345. + status = wait_for_completion_timeout(&htc->ctl_resp,
  6346. + ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
  6347. +- if (status <= 0) {
  6348. +- if (status == 0)
  6349. +- status = -ETIMEDOUT;
  6350. +- ath10k_err("Service connect timeout: %d\n", status);
  6351. +- return status;
  6352. ++ if (status == 0) {
  6353. ++ ath10k_err(ar, "Service connect timeout: %d\n", status);
  6354. ++ return -ETIMEDOUT;
  6355. + }
  6356. +
  6357. + /* we controlled the buffer creation, it's aligned */
  6358. +@@ -700,11 +717,11 @@ int ath10k_htc_connect_service(struct at
  6359. + if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
  6360. + (htc->control_resp_len < sizeof(msg->hdr) +
  6361. + sizeof(msg->connect_service_response))) {
  6362. +- ath10k_err("Invalid resp message ID 0x%x", message_id);
  6363. ++ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
  6364. + return -EPROTO;
  6365. + }
  6366. +
  6367. +- ath10k_dbg(ATH10K_DBG_HTC,
  6368. ++ ath10k_dbg(ar, ATH10K_DBG_HTC,
  6369. + "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
  6370. + htc_service_name(service_id),
  6371. + resp_msg->status, resp_msg->eid);
  6372. +@@ -713,7 +730,7 @@ int ath10k_htc_connect_service(struct at
  6373. +
  6374. + /* check response status */
  6375. + if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
  6376. +- ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
  6377. ++ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
  6378. + htc_service_name(service_id),
  6379. + resp_msg->status);
  6380. + return -EPROTO;
  6381. +@@ -764,18 +781,18 @@ setup:
  6382. + if (status)
  6383. + return status;
  6384. +
  6385. +- ath10k_dbg(ATH10K_DBG_BOOT,
  6386. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  6387. + "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
  6388. + htc_service_name(ep->service_id), ep->ul_pipe_id,
  6389. + ep->dl_pipe_id, ep->eid);
  6390. +
  6391. +- ath10k_dbg(ATH10K_DBG_BOOT,
  6392. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  6393. + "boot htc ep %d ul polled %d dl polled %d\n",
  6394. + ep->eid, ep->ul_is_polled, ep->dl_is_polled);
  6395. +
  6396. + if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
  6397. + ep->tx_credit_flow_enabled = false;
  6398. +- ath10k_dbg(ATH10K_DBG_BOOT,
  6399. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
  6400. + "boot htc service '%s' eid %d TX flow control disabled\n",
  6401. + htc_service_name(ep->service_id), assigned_eid);
  6402. + }
  6403. +@@ -783,27 +800,26 @@ setup:
  6404. + return status;
  6405. + }
  6406. +
  6407. +-struct sk_buff *ath10k_htc_alloc_skb(int size)
  6408. ++struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
  6409. + {
  6410. + struct sk_buff *skb;
  6411. +
  6412. + skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
  6413. +- if (!skb) {
  6414. +- ath10k_warn("could not allocate HTC tx skb\n");
  6415. ++ if (!skb)
  6416. + return NULL;
  6417. +- }
  6418. +
  6419. + skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
  6420. +
  6421. + /* FW/HTC requires 4-byte aligned streams */
  6422. + if (!IS_ALIGNED((unsigned long)skb->data, 4))
  6423. +- ath10k_warn("Unaligned HTC tx skb\n");
  6424. ++ ath10k_warn(ar, "Unaligned HTC tx skb\n");
  6425. +
  6426. + return skb;
  6427. + }
  6428. +
  6429. + int ath10k_htc_start(struct ath10k_htc *htc)
  6430. + {
  6431. ++ struct ath10k *ar = htc->ar;
  6432. + struct sk_buff *skb;
  6433. + int status = 0;
  6434. + struct ath10k_htc_msg *msg;
  6435. +@@ -819,7 +835,7 @@ int ath10k_htc_start(struct ath10k_htc *
  6436. + msg->hdr.message_id =
  6437. + __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
  6438. +
  6439. +- ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
  6440. ++ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
  6441. +
  6442. + status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
  6443. + if (status) {
  6444. +@@ -830,19 +846,6 @@ int ath10k_htc_start(struct ath10k_htc *
  6445. + return 0;
  6446. + }
  6447. +
  6448. +-/*
  6449. +- * stop HTC communications, i.e. stop interrupt reception, and flush all
  6450. +- * queued buffers
  6451. +- */
  6452. +-void ath10k_htc_stop(struct ath10k_htc *htc)
  6453. +-{
  6454. +- spin_lock_bh(&htc->tx_lock);
  6455. +- htc->stopped = true;
  6456. +- spin_unlock_bh(&htc->tx_lock);
  6457. +-
  6458. +- ath10k_hif_stop(htc->ar);
  6459. +-}
  6460. +-
  6461. + /* registered target arrival callback from the HIF layer */
  6462. + int ath10k_htc_init(struct ath10k *ar)
  6463. + {
  6464. +@@ -852,7 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
  6465. +
  6466. + spin_lock_init(&htc->tx_lock);
  6467. +
  6468. +- htc->stopped = false;
  6469. + ath10k_htc_reset_endpoint_states(htc);
  6470. +
  6471. + /* setup HIF layer callbacks */
  6472. +--- a/drivers/net/wireless/ath/ath10k/htc.h
  6473. ++++ b/drivers/net/wireless/ath/ath10k/htc.h
  6474. +@@ -214,7 +214,6 @@ struct ath10k_htc_frame {
  6475. + struct ath10k_htc_record trailer[0];
  6476. + } __packed __aligned(4);
  6477. +
  6478. +-
  6479. + /*******************/
  6480. + /* Host-side stuff */
  6481. + /*******************/
  6482. +@@ -332,7 +331,7 @@ struct ath10k_htc {
  6483. + struct ath10k *ar;
  6484. + struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
  6485. +
  6486. +- /* protects endpoint and stopped fields */
  6487. ++ /* protects endpoints */
  6488. + spinlock_t tx_lock;
  6489. +
  6490. + struct ath10k_htc_ops htc_ops;
  6491. +@@ -345,8 +344,6 @@ struct ath10k_htc {
  6492. + int total_transmit_credits;
  6493. + struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
  6494. + int target_credit_size;
  6495. +-
  6496. +- bool stopped;
  6497. + };
  6498. +
  6499. + int ath10k_htc_init(struct ath10k *ar);
  6500. +@@ -357,7 +354,6 @@ int ath10k_htc_connect_service(struct at
  6501. + struct ath10k_htc_svc_conn_resp *conn_resp);
  6502. + int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
  6503. + struct sk_buff *packet);
  6504. +-void ath10k_htc_stop(struct ath10k_htc *htc);
  6505. +-struct sk_buff *ath10k_htc_alloc_skb(int size);
  6506. ++struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
  6507. +
  6508. + #endif
  6509. +--- a/drivers/net/wireless/ath/ath10k/htt.c
  6510. ++++ b/drivers/net/wireless/ath/ath10k/htt.c
  6511. +@@ -22,7 +22,7 @@
  6512. + #include "core.h"
  6513. + #include "debug.h"
  6514. +
  6515. +-static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
  6516. ++int ath10k_htt_connect(struct ath10k_htt *htt)
  6517. + {
  6518. + struct ath10k_htc_svc_conn_req conn_req;
  6519. + struct ath10k_htc_svc_conn_resp conn_resp;
  6520. +@@ -48,37 +48,11 @@ static int ath10k_htt_htc_attach(struct
  6521. + return 0;
  6522. + }
  6523. +
  6524. +-int ath10k_htt_attach(struct ath10k *ar)
  6525. ++int ath10k_htt_init(struct ath10k *ar)
  6526. + {
  6527. + struct ath10k_htt *htt = &ar->htt;
  6528. +- int ret;
  6529. +
  6530. + htt->ar = ar;
  6531. +- htt->max_throughput_mbps = 800;
  6532. +-
  6533. +- /*
  6534. +- * Connect to HTC service.
  6535. +- * This has to be done before calling ath10k_htt_rx_attach,
  6536. +- * since ath10k_htt_rx_attach involves sending a rx ring configure
  6537. +- * message to the target.
  6538. +- */
  6539. +- ret = ath10k_htt_htc_attach(htt);
  6540. +- if (ret) {
  6541. +- ath10k_err("could not attach htt htc (%d)\n", ret);
  6542. +- goto err_htc_attach;
  6543. +- }
  6544. +-
  6545. +- ret = ath10k_htt_tx_attach(htt);
  6546. +- if (ret) {
  6547. +- ath10k_err("could not attach htt tx (%d)\n", ret);
  6548. +- goto err_htc_attach;
  6549. +- }
  6550. +-
  6551. +- ret = ath10k_htt_rx_attach(htt);
  6552. +- if (ret) {
  6553. +- ath10k_err("could not attach htt rx (%d)\n", ret);
  6554. +- goto err_rx_attach;
  6555. +- }
  6556. +
  6557. + /*
  6558. + * Prefetch enough data to satisfy target
  6559. +@@ -93,23 +67,20 @@ int ath10k_htt_attach(struct ath10k *ar)
  6560. + 2; /* ip4 dscp or ip6 priority */
  6561. +
  6562. + return 0;
  6563. +-
  6564. +-err_rx_attach:
  6565. +- ath10k_htt_tx_detach(htt);
  6566. +-err_htc_attach:
  6567. +- return ret;
  6568. + }
  6569. +
  6570. + #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
  6571. +
  6572. + static int ath10k_htt_verify_version(struct ath10k_htt *htt)
  6573. + {
  6574. +- ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
  6575. ++ struct ath10k *ar = htt->ar;
  6576. ++
  6577. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
  6578. + htt->target_version_major, htt->target_version_minor);
  6579. +
  6580. + if (htt->target_version_major != 2 &&
  6581. + htt->target_version_major != 3) {
  6582. +- ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
  6583. ++ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
  6584. + htt->target_version_major);
  6585. + return -ENOTSUPP;
  6586. + }
  6587. +@@ -117,8 +88,9 @@ static int ath10k_htt_verify_version(str
  6588. + return 0;
  6589. + }
  6590. +
  6591. +-int ath10k_htt_attach_target(struct ath10k_htt *htt)
  6592. ++int ath10k_htt_setup(struct ath10k_htt *htt)
  6593. + {
  6594. ++ struct ath10k *ar = htt->ar;
  6595. + int status;
  6596. +
  6597. + init_completion(&htt->target_version_received);
  6598. +@@ -128,9 +100,9 @@ int ath10k_htt_attach_target(struct ath1
  6599. + return status;
  6600. +
  6601. + status = wait_for_completion_timeout(&htt->target_version_received,
  6602. +- HTT_TARGET_VERSION_TIMEOUT_HZ);
  6603. +- if (status <= 0) {
  6604. +- ath10k_warn("htt version request timed out\n");
  6605. ++ HTT_TARGET_VERSION_TIMEOUT_HZ);
  6606. ++ if (status == 0) {
  6607. ++ ath10k_warn(ar, "htt version request timed out\n");
  6608. + return -ETIMEDOUT;
  6609. + }
  6610. +
  6611. +@@ -140,9 +112,3 @@ int ath10k_htt_attach_target(struct ath1
  6612. +
  6613. + return ath10k_htt_send_rx_ring_cfg_ll(htt);
  6614. + }
  6615. +-
  6616. +-void ath10k_htt_detach(struct ath10k_htt *htt)
  6617. +-{
  6618. +- ath10k_htt_rx_detach(htt);
  6619. +- ath10k_htt_tx_detach(htt);
  6620. +-}
  6621. +--- a/drivers/net/wireless/ath/ath10k/htt.h
  6622. ++++ b/drivers/net/wireless/ath/ath10k/htt.h
  6623. +@@ -21,6 +21,7 @@
  6624. + #include <linux/bug.h>
  6625. + #include <linux/interrupt.h>
  6626. + #include <linux/dmapool.h>
  6627. ++#include <linux/hashtable.h>
  6628. + #include <net/mac80211.h>
  6629. +
  6630. + #include "htc.h"
  6631. +@@ -126,6 +127,7 @@ enum htt_data_tx_ext_tid {
  6632. + * (HL hosts manage queues on the host )
  6633. + * more_in_batch: only for HL hosts. indicates if more packets are
  6634. + * pending. this allows target to wait and aggregate
  6635. ++ * freq: 0 means home channel of given vdev. intended for offchannel
  6636. + */
  6637. + struct htt_data_tx_desc {
  6638. + u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
  6639. +@@ -133,7 +135,8 @@ struct htt_data_tx_desc {
  6640. + __le16 len;
  6641. + __le16 id;
  6642. + __le32 frags_paddr;
  6643. +- __le32 peerid;
  6644. ++ __le16 peerid;
  6645. ++ __le16 freq;
  6646. + u8 prefetch[0]; /* start of frame, for FW classification engine */
  6647. + } __packed;
  6648. +
  6649. +@@ -156,6 +159,9 @@ enum htt_rx_ring_flags {
  6650. + HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
  6651. + };
  6652. +
  6653. ++#define HTT_RX_RING_SIZE_MIN 128
  6654. ++#define HTT_RX_RING_SIZE_MAX 2048
  6655. ++
  6656. + struct htt_rx_ring_setup_ring {
  6657. + __le32 fw_idx_shadow_reg_paddr;
  6658. + __le32 rx_ring_base_paddr;
  6659. +@@ -240,16 +246,10 @@ struct htt_oob_sync_req {
  6660. + __le16 rsvd0;
  6661. + } __packed;
  6662. +
  6663. +-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
  6664. +-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0
  6665. +-
  6666. + struct htt_aggr_conf {
  6667. + u8 max_num_ampdu_subframes;
  6668. +- union {
  6669. +- /* dont use bitfields; undefined behaviour */
  6670. +- u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
  6671. +- u8 max_num_amsdu_subframes:5;
  6672. +- } __packed;
  6673. ++ /* amsdu_subframes is limited by 0x1F mask */
  6674. ++ u8 max_num_amsdu_subframes;
  6675. + } __packed;
  6676. +
  6677. + #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
  6678. +@@ -271,7 +271,6 @@ enum htt_mgmt_tx_status {
  6679. +
  6680. + /*=== target -> host messages ===============================================*/
  6681. +
  6682. +-
  6683. + enum htt_t2h_msg_type {
  6684. + HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
  6685. + HTT_T2H_MSG_TYPE_RX_IND = 0x1,
  6686. +@@ -288,7 +287,19 @@ enum htt_t2h_msg_type {
  6687. + HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
  6688. + HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
  6689. + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
  6690. ++ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
  6691. ++ HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
  6692. ++ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
  6693. ++ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
  6694. ++ /* 0x13 reservd */
  6695. ++ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
  6696. ++
  6697. ++ /* FIXME: Do not depend on this event id. Numbering of this event id is
  6698. ++ * broken across different firmware revisions and HTT version fails to
  6699. ++ * indicate this.
  6700. ++ */
  6701. + HTT_T2H_MSG_TYPE_TEST,
  6702. ++
  6703. + /* keep this last */
  6704. + HTT_T2H_NUM_MSGS
  6705. + };
  6706. +@@ -657,6 +668,53 @@ struct htt_rx_fragment_indication {
  6707. + #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
  6708. + #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
  6709. +
  6710. ++struct htt_rx_pn_ind {
  6711. ++ __le16 peer_id;
  6712. ++ u8 tid;
  6713. ++ u8 seqno_start;
  6714. ++ u8 seqno_end;
  6715. ++ u8 pn_ie_count;
  6716. ++ u8 reserved;
  6717. ++ u8 pn_ies[0];
  6718. ++} __packed;
  6719. ++
  6720. ++struct htt_rx_offload_msdu {
  6721. ++ __le16 msdu_len;
  6722. ++ __le16 peer_id;
  6723. ++ u8 vdev_id;
  6724. ++ u8 tid;
  6725. ++ u8 fw_desc;
  6726. ++ u8 payload[0];
  6727. ++} __packed;
  6728. ++
  6729. ++struct htt_rx_offload_ind {
  6730. ++ u8 reserved;
  6731. ++ __le16 msdu_count;
  6732. ++} __packed;
  6733. ++
  6734. ++struct htt_rx_in_ord_msdu_desc {
  6735. ++ __le32 msdu_paddr;
  6736. ++ __le16 msdu_len;
  6737. ++ u8 fw_desc;
  6738. ++ u8 reserved;
  6739. ++} __packed;
  6740. ++
  6741. ++struct htt_rx_in_ord_ind {
  6742. ++ u8 info;
  6743. ++ __le16 peer_id;
  6744. ++ u8 vdev_id;
  6745. ++ u8 reserved;
  6746. ++ __le16 msdu_count;
  6747. ++ struct htt_rx_in_ord_msdu_desc msdu_descs[0];
  6748. ++} __packed;
  6749. ++
  6750. ++#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
  6751. ++#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
  6752. ++#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
  6753. ++#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
  6754. ++#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
  6755. ++#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
  6756. ++
  6757. + /*
  6758. + * target -> host test message definition
  6759. + *
  6760. +@@ -732,7 +790,7 @@ static inline u8 *htt_rx_test_get_chars(
  6761. + */
  6762. + struct htt_pktlog_msg {
  6763. + u8 pad[3];
  6764. +- __le32 payload[1 /* or more */];
  6765. ++ u8 payload[0];
  6766. + } __packed;
  6767. +
  6768. + struct htt_dbg_stats_rx_reorder_stats {
  6769. +@@ -1038,6 +1096,7 @@ static inline struct htt_stats_conf_item
  6770. + {
  6771. + return (void *)item + sizeof(*item) + roundup(item->length, 4);
  6772. + }
  6773. ++
  6774. + /*
  6775. + * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
  6776. + *
  6777. +@@ -1151,10 +1210,12 @@ struct htt_resp {
  6778. + struct htt_rx_test rx_test;
  6779. + struct htt_pktlog_msg pktlog_msg;
  6780. + struct htt_stats_conf stats_conf;
  6781. ++ struct htt_rx_pn_ind rx_pn_ind;
  6782. ++ struct htt_rx_offload_ind rx_offload_ind;
  6783. ++ struct htt_rx_in_ord_ind rx_in_ord_ind;
  6784. + };
  6785. + } __packed;
  6786. +
  6787. +-
  6788. + /*** host side structures follow ***/
  6789. +
  6790. + struct htt_tx_done {
  6791. +@@ -1184,7 +1245,6 @@ struct ath10k_htt {
  6792. + struct ath10k *ar;
  6793. + enum ath10k_htc_ep_id eid;
  6794. +
  6795. +- int max_throughput_mbps;
  6796. + u8 target_version_major;
  6797. + u8 target_version_minor;
  6798. + struct completion target_version_received;
  6799. +@@ -1200,6 +1260,20 @@ struct ath10k_htt {
  6800. + * filled.
  6801. + */
  6802. + struct sk_buff **netbufs_ring;
  6803. ++
  6804. ++ /* This is used only with firmware supporting IN_ORD_IND.
  6805. ++ *
  6806. ++ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
  6807. ++ * buffer ring from which buffer addresses are copied by the
  6808. ++ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
  6809. ++ * pointing to specific (re-ordered) buffers.
  6810. ++ *
  6811. ++ * FIXME: With kernel generic hashing functions there's a lot
  6812. ++ * of hash collisions for sk_buffs.
  6813. ++ */
  6814. ++ bool in_ord_rx;
  6815. ++ DECLARE_HASHTABLE(skb_table, 4);
  6816. ++
  6817. + /*
  6818. + * Ring of buffer addresses -
  6819. + * This ring holds the "physical" device address of the
  6820. +@@ -1254,12 +1328,11 @@ struct ath10k_htt {
  6821. +
  6822. + unsigned int prefetch_len;
  6823. +
  6824. +- /* Protects access to %pending_tx, %used_msdu_ids */
  6825. ++ /* Protects access to pending_tx, num_pending_tx */
  6826. + spinlock_t tx_lock;
  6827. + int max_num_pending_tx;
  6828. + int num_pending_tx;
  6829. +- struct sk_buff **pending_tx;
  6830. +- unsigned long *used_msdu_ids; /* bitmap */
  6831. ++ struct idr pending_tx;
  6832. + wait_queue_head_t empty_tx_wq;
  6833. + struct dma_pool *tx_pool;
  6834. +
  6835. +@@ -1273,6 +1346,7 @@ struct ath10k_htt {
  6836. + struct tasklet_struct txrx_compl_task;
  6837. + struct sk_buff_head tx_compl_q;
  6838. + struct sk_buff_head rx_compl_q;
  6839. ++ struct sk_buff_head rx_in_ord_compl_q;
  6840. +
  6841. + /* rx_status template */
  6842. + struct ieee80211_rx_status rx_status;
  6843. +@@ -1328,22 +1402,28 @@ struct htt_rx_desc {
  6844. + #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
  6845. + #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
  6846. +
  6847. +-int ath10k_htt_attach(struct ath10k *ar);
  6848. +-int ath10k_htt_attach_target(struct ath10k_htt *htt);
  6849. +-void ath10k_htt_detach(struct ath10k_htt *htt);
  6850. +-
  6851. +-int ath10k_htt_tx_attach(struct ath10k_htt *htt);
  6852. +-void ath10k_htt_tx_detach(struct ath10k_htt *htt);
  6853. +-int ath10k_htt_rx_attach(struct ath10k_htt *htt);
  6854. +-void ath10k_htt_rx_detach(struct ath10k_htt *htt);
  6855. ++int ath10k_htt_connect(struct ath10k_htt *htt);
  6856. ++int ath10k_htt_init(struct ath10k *ar);
  6857. ++int ath10k_htt_setup(struct ath10k_htt *htt);
  6858. ++
  6859. ++int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
  6860. ++void ath10k_htt_tx_free(struct ath10k_htt *htt);
  6861. ++
  6862. ++int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
  6863. ++int ath10k_htt_rx_ring_refill(struct ath10k *ar);
  6864. ++void ath10k_htt_rx_free(struct ath10k_htt *htt);
  6865. ++
  6866. + void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
  6867. + void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
  6868. + int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
  6869. + int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
  6870. + int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
  6871. ++int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
  6872. ++ u8 max_subfrms_ampdu,
  6873. ++ u8 max_subfrms_amsdu);
  6874. +
  6875. + void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
  6876. +-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
  6877. ++int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
  6878. + void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
  6879. + int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
  6880. + int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
  6881. +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
  6882. ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
  6883. +@@ -21,118 +21,84 @@
  6884. + #include "txrx.h"
  6885. + #include "debug.h"
  6886. + #include "trace.h"
  6887. ++#include "mac.h"
  6888. +
  6889. + #include <linux/log2.h>
  6890. +
  6891. +-/* slightly larger than one large A-MPDU */
  6892. +-#define HTT_RX_RING_SIZE_MIN 128
  6893. +-
  6894. +-/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
  6895. +-#define HTT_RX_RING_SIZE_MAX 2048
  6896. +-
  6897. +-#define HTT_RX_AVG_FRM_BYTES 1000
  6898. +-
  6899. +-/* ms, very conservative */
  6900. +-#define HTT_RX_HOST_LATENCY_MAX_MS 20
  6901. +-
  6902. +-/* ms, conservative */
  6903. +-#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
  6904. ++#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  6905. ++#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  6906. +
  6907. + /* when under memory pressure rx ring refill may fail and needs a retry */
  6908. + #define HTT_RX_RING_REFILL_RETRY_MS 50
  6909. +
  6910. +-
  6911. + static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  6912. + static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  6913. +
  6914. +-static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
  6915. +-{
  6916. +- int size;
  6917. +-
  6918. +- /*
  6919. +- * It is expected that the host CPU will typically be able to
  6920. +- * service the rx indication from one A-MPDU before the rx
  6921. +- * indication from the subsequent A-MPDU happens, roughly 1-2 ms
  6922. +- * later. However, the rx ring should be sized very conservatively,
  6923. +- * to accomodate the worst reasonable delay before the host CPU
  6924. +- * services a rx indication interrupt.
  6925. +- *
  6926. +- * The rx ring need not be kept full of empty buffers. In theory,
  6927. +- * the htt host SW can dynamically track the low-water mark in the
  6928. +- * rx ring, and dynamically adjust the level to which the rx ring
  6929. +- * is filled with empty buffers, to dynamically meet the desired
  6930. +- * low-water mark.
  6931. +- *
  6932. +- * In contrast, it's difficult to resize the rx ring itself, once
  6933. +- * it's in use. Thus, the ring itself should be sized very
  6934. +- * conservatively, while the degree to which the ring is filled
  6935. +- * with empty buffers should be sized moderately conservatively.
  6936. +- */
  6937. +-
  6938. +- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  6939. +- size =
  6940. +- htt->max_throughput_mbps +
  6941. +- 1000 /
  6942. +- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  6943. +-
  6944. +- if (size < HTT_RX_RING_SIZE_MIN)
  6945. +- size = HTT_RX_RING_SIZE_MIN;
  6946. +-
  6947. +- if (size > HTT_RX_RING_SIZE_MAX)
  6948. +- size = HTT_RX_RING_SIZE_MAX;
  6949. +-
  6950. +- size = roundup_pow_of_two(size);
  6951. +-
  6952. +- return size;
  6953. +-}
  6954. +-
  6955. +-static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
  6956. ++static struct sk_buff *
  6957. ++ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  6958. + {
  6959. +- int size;
  6960. ++ struct ath10k_skb_rxcb *rxcb;
  6961. +
  6962. +- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  6963. +- size =
  6964. +- htt->max_throughput_mbps *
  6965. +- 1000 /
  6966. +- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  6967. +-
  6968. +- /*
  6969. +- * Make sure the fill level is at least 1 less than the ring size.
  6970. +- * Leaving 1 element empty allows the SW to easily distinguish
  6971. +- * between a full ring vs. an empty ring.
  6972. +- */
  6973. +- if (size >= htt->rx_ring.size)
  6974. +- size = htt->rx_ring.size - 1;
  6975. ++ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  6976. ++ if (rxcb->paddr == paddr)
  6977. ++ return ATH10K_RXCB_SKB(rxcb);
  6978. +
  6979. +- return size;
  6980. ++ WARN_ON_ONCE(1);
  6981. ++ return NULL;
  6982. + }
  6983. +
  6984. + static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  6985. + {
  6986. + struct sk_buff *skb;
  6987. +- struct ath10k_skb_cb *cb;
  6988. ++ struct ath10k_skb_rxcb *rxcb;
  6989. ++ struct hlist_node *n;
  6990. + int i;
  6991. +
  6992. +- for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
  6993. +- skb = htt->rx_ring.netbufs_ring[i];
  6994. +- cb = ATH10K_SKB_CB(skb);
  6995. +- dma_unmap_single(htt->ar->dev, cb->paddr,
  6996. +- skb->len + skb_tailroom(skb),
  6997. +- DMA_FROM_DEVICE);
  6998. +- dev_kfree_skb_any(skb);
  6999. ++ if (htt->rx_ring.in_ord_rx) {
  7000. ++ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  7001. ++ skb = ATH10K_RXCB_SKB(rxcb);
  7002. ++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
  7003. ++ skb->len + skb_tailroom(skb),
  7004. ++ DMA_FROM_DEVICE);
  7005. ++ hash_del(&rxcb->hlist);
  7006. ++ dev_kfree_skb_any(skb);
  7007. ++ }
  7008. ++ } else {
  7009. ++ for (i = 0; i < htt->rx_ring.size; i++) {
  7010. ++ skb = htt->rx_ring.netbufs_ring[i];
  7011. ++ if (!skb)
  7012. ++ continue;
  7013. ++
  7014. ++ rxcb = ATH10K_SKB_RXCB(skb);
  7015. ++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
  7016. ++ skb->len + skb_tailroom(skb),
  7017. ++ DMA_FROM_DEVICE);
  7018. ++ dev_kfree_skb_any(skb);
  7019. ++ }
  7020. + }
  7021. +
  7022. + htt->rx_ring.fill_cnt = 0;
  7023. ++ hash_init(htt->rx_ring.skb_table);
  7024. ++ memset(htt->rx_ring.netbufs_ring, 0,
  7025. ++ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  7026. + }
  7027. +
  7028. + static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  7029. + {
  7030. + struct htt_rx_desc *rx_desc;
  7031. ++ struct ath10k_skb_rxcb *rxcb;
  7032. + struct sk_buff *skb;
  7033. + dma_addr_t paddr;
  7034. + int ret = 0, idx;
  7035. +
  7036. +- idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
  7037. ++ /* The Full Rx Reorder firmware has no way of telling the host
  7038. ++ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  7039. ++ * To keep things simple make sure ring is always half empty. This
  7040. ++ * guarantees there'll be no replenishment overruns possible.
  7041. ++ */
  7042. ++ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  7043. ++
  7044. ++ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  7045. + while (num > 0) {
  7046. + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  7047. + if (!skb) {
  7048. +@@ -159,18 +125,30 @@ static int __ath10k_htt_rx_ring_fill_n(s
  7049. + goto fail;
  7050. + }
  7051. +
  7052. +- ATH10K_SKB_CB(skb)->paddr = paddr;
  7053. ++ rxcb = ATH10K_SKB_RXCB(skb);
  7054. ++ rxcb->paddr = paddr;
  7055. + htt->rx_ring.netbufs_ring[idx] = skb;
  7056. + htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  7057. + htt->rx_ring.fill_cnt++;
  7058. +
  7059. ++ if (htt->rx_ring.in_ord_rx) {
  7060. ++ hash_add(htt->rx_ring.skb_table,
  7061. ++ &ATH10K_SKB_RXCB(skb)->hlist,
  7062. ++ (u32)paddr);
  7063. ++ }
  7064. ++
  7065. + num--;
  7066. + idx++;
  7067. + idx &= htt->rx_ring.size_mask;
  7068. + }
  7069. +
  7070. + fail:
  7071. +- *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
  7072. ++ /*
  7073. ++ * Make sure the rx buffer is updated before available buffer
  7074. ++ * index to avoid any potential rx ring corruption.
  7075. ++ */
  7076. ++ mb();
  7077. ++ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  7078. + return ret;
  7079. + }
  7080. +
  7081. +@@ -198,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_repl
  7082. + * automatically balances load wrt to CPU power.
  7083. + *
  7084. + * This probably comes at a cost of lower maximum throughput but
  7085. +- * improves the avarage and stability. */
  7086. ++ * improves the average and stability. */
  7087. + spin_lock_bh(&htt->rx_ring.lock);
  7088. + num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  7089. + num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  7090. +@@ -222,32 +200,37 @@ static void ath10k_htt_rx_msdu_buff_repl
  7091. + static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  7092. + {
  7093. + struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  7094. ++
  7095. + ath10k_htt_rx_msdu_buff_replenish(htt);
  7096. + }
  7097. +
  7098. +-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
  7099. ++int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  7100. + {
  7101. +- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  7102. ++ struct ath10k_htt *htt = &ar->htt;
  7103. ++ int ret;
  7104. ++
  7105. ++ spin_lock_bh(&htt->rx_ring.lock);
  7106. ++ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  7107. ++ htt->rx_ring.fill_cnt));
  7108. ++ spin_unlock_bh(&htt->rx_ring.lock);
  7109. ++
  7110. ++ if (ret)
  7111. ++ ath10k_htt_rx_ring_free(htt);
  7112. ++
  7113. ++ return ret;
  7114. ++}
  7115. +
  7116. ++void ath10k_htt_rx_free(struct ath10k_htt *htt)
  7117. ++{
  7118. + del_timer_sync(&htt->rx_ring.refill_retry_timer);
  7119. + tasklet_kill(&htt->rx_replenish_task);
  7120. + tasklet_kill(&htt->txrx_compl_task);
  7121. +
  7122. + skb_queue_purge(&htt->tx_compl_q);
  7123. + skb_queue_purge(&htt->rx_compl_q);
  7124. ++ skb_queue_purge(&htt->rx_in_ord_compl_q);
  7125. +
  7126. +- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
  7127. +- struct sk_buff *skb =
  7128. +- htt->rx_ring.netbufs_ring[sw_rd_idx];
  7129. +- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
  7130. +-
  7131. +- dma_unmap_single(htt->ar->dev, cb->paddr,
  7132. +- skb->len + skb_tailroom(skb),
  7133. +- DMA_FROM_DEVICE);
  7134. +- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
  7135. +- sw_rd_idx++;
  7136. +- sw_rd_idx &= htt->rx_ring.size_mask;
  7137. +- }
  7138. ++ ath10k_htt_rx_ring_free(htt);
  7139. +
  7140. + dma_free_coherent(htt->ar->dev,
  7141. + (htt->rx_ring.size *
  7142. +@@ -265,66 +248,59 @@ void ath10k_htt_rx_detach(struct ath10k_
  7143. +
  7144. + static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  7145. + {
  7146. ++ struct ath10k *ar = htt->ar;
  7147. + int idx;
  7148. + struct sk_buff *msdu;
  7149. +
  7150. + lockdep_assert_held(&htt->rx_ring.lock);
  7151. +
  7152. + if (htt->rx_ring.fill_cnt == 0) {
  7153. +- ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
  7154. ++ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  7155. + return NULL;
  7156. + }
  7157. +
  7158. + idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  7159. + msdu = htt->rx_ring.netbufs_ring[idx];
  7160. ++ htt->rx_ring.netbufs_ring[idx] = NULL;
  7161. ++ htt->rx_ring.paddrs_ring[idx] = 0;
  7162. +
  7163. + idx++;
  7164. + idx &= htt->rx_ring.size_mask;
  7165. + htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  7166. + htt->rx_ring.fill_cnt--;
  7167. +
  7168. +- return msdu;
  7169. +-}
  7170. +-
  7171. +-static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
  7172. +-{
  7173. +- struct sk_buff *next;
  7174. ++ dma_unmap_single(htt->ar->dev,
  7175. ++ ATH10K_SKB_RXCB(msdu)->paddr,
  7176. ++ msdu->len + skb_tailroom(msdu),
  7177. ++ DMA_FROM_DEVICE);
  7178. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  7179. ++ msdu->data, msdu->len + skb_tailroom(msdu));
  7180. +
  7181. +- while (skb) {
  7182. +- next = skb->next;
  7183. +- dev_kfree_skb_any(skb);
  7184. +- skb = next;
  7185. +- }
  7186. ++ return msdu;
  7187. + }
  7188. +
  7189. + /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  7190. + static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  7191. + u8 **fw_desc, int *fw_desc_len,
  7192. +- struct sk_buff **head_msdu,
  7193. +- struct sk_buff **tail_msdu)
  7194. ++ struct sk_buff_head *amsdu)
  7195. + {
  7196. ++ struct ath10k *ar = htt->ar;
  7197. + int msdu_len, msdu_chaining = 0;
  7198. + struct sk_buff *msdu;
  7199. + struct htt_rx_desc *rx_desc;
  7200. +
  7201. + lockdep_assert_held(&htt->rx_ring.lock);
  7202. +
  7203. +- if (htt->rx_confused) {
  7204. +- ath10k_warn("htt is confused. refusing rx\n");
  7205. +- return -1;
  7206. +- }
  7207. +-
  7208. +- msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
  7209. +- while (msdu) {
  7210. ++ for (;;) {
  7211. + int last_msdu, msdu_len_invalid, msdu_chained;
  7212. +
  7213. +- dma_unmap_single(htt->ar->dev,
  7214. +- ATH10K_SKB_CB(msdu)->paddr,
  7215. +- msdu->len + skb_tailroom(msdu),
  7216. +- DMA_FROM_DEVICE);
  7217. ++ msdu = ath10k_htt_rx_netbuf_pop(htt);
  7218. ++ if (!msdu) {
  7219. ++ __skb_queue_purge(amsdu);
  7220. ++ return -ENOENT;
  7221. ++ }
  7222. +
  7223. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
  7224. +- msdu->data, msdu->len + skb_tailroom(msdu));
  7225. ++ __skb_queue_tail(amsdu, msdu);
  7226. +
  7227. + rx_desc = (struct htt_rx_desc *)msdu->data;
  7228. +
  7229. +@@ -343,12 +319,8 @@ static int ath10k_htt_rx_amsdu_pop(struc
  7230. + */
  7231. + if (!(__le32_to_cpu(rx_desc->attention.flags)
  7232. + & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  7233. +- ath10k_htt_rx_free_msdu_chain(*head_msdu);
  7234. +- *head_msdu = NULL;
  7235. +- msdu = NULL;
  7236. +- ath10k_err("htt rx stopped. cannot recover\n");
  7237. +- htt->rx_confused = true;
  7238. +- break;
  7239. ++ __skb_queue_purge(amsdu);
  7240. ++ return -EIO;
  7241. + }
  7242. +
  7243. + /*
  7244. +@@ -399,7 +371,6 @@ static int ath10k_htt_rx_amsdu_pop(struc
  7245. + msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
  7246. + RX_MSDU_START_INFO0_MSDU_LENGTH);
  7247. + msdu_chained = rx_desc->frag_info.ring2_more_count;
  7248. +- msdu_chaining = msdu_chained;
  7249. +
  7250. + if (msdu_len_invalid)
  7251. + msdu_len = 0;
  7252. +@@ -408,42 +379,32 @@ static int ath10k_htt_rx_amsdu_pop(struc
  7253. + skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  7254. + msdu_len -= msdu->len;
  7255. +
  7256. +- /* FIXME: Do chained buffers include htt_rx_desc or not? */
  7257. ++ /* Note: Chained buffers do not contain rx descriptor */
  7258. + while (msdu_chained--) {
  7259. +- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  7260. +-
  7261. +- dma_unmap_single(htt->ar->dev,
  7262. +- ATH10K_SKB_CB(next)->paddr,
  7263. +- next->len + skb_tailroom(next),
  7264. +- DMA_FROM_DEVICE);
  7265. +-
  7266. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
  7267. +- "htt rx chained: ", next->data,
  7268. +- next->len + skb_tailroom(next));
  7269. +-
  7270. +- skb_trim(next, 0);
  7271. +- skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
  7272. +- msdu_len -= next->len;
  7273. ++ msdu = ath10k_htt_rx_netbuf_pop(htt);
  7274. ++ if (!msdu) {
  7275. ++ __skb_queue_purge(amsdu);
  7276. ++ return -ENOENT;
  7277. ++ }
  7278. +
  7279. +- msdu->next = next;
  7280. +- msdu = next;
  7281. ++ __skb_queue_tail(amsdu, msdu);
  7282. ++ skb_trim(msdu, 0);
  7283. ++ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  7284. ++ msdu_len -= msdu->len;
  7285. ++ msdu_chaining = 1;
  7286. + }
  7287. +
  7288. + last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
  7289. + RX_MSDU_END_INFO0_LAST_MSDU;
  7290. +
  7291. +- if (last_msdu) {
  7292. +- msdu->next = NULL;
  7293. ++ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  7294. ++ sizeof(*rx_desc) - sizeof(u32));
  7295. ++
  7296. ++ if (last_msdu)
  7297. + break;
  7298. +- } else {
  7299. +- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  7300. +- msdu->next = next;
  7301. +- msdu = next;
  7302. +- }
  7303. + }
  7304. +- *tail_msdu = msdu;
  7305. +
  7306. +- if (*head_msdu == NULL)
  7307. ++ if (skb_queue_empty(amsdu))
  7308. + msdu_chaining = -1;
  7309. +
  7310. + /*
  7311. +@@ -465,43 +426,117 @@ static int ath10k_htt_rx_amsdu_pop(struc
  7312. + static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  7313. + {
  7314. + struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  7315. ++
  7316. + ath10k_htt_rx_msdu_buff_replenish(htt);
  7317. + }
  7318. +
  7319. +-int ath10k_htt_rx_attach(struct ath10k_htt *htt)
  7320. ++static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  7321. ++ u32 paddr)
  7322. ++{
  7323. ++ struct ath10k *ar = htt->ar;
  7324. ++ struct ath10k_skb_rxcb *rxcb;
  7325. ++ struct sk_buff *msdu;
  7326. ++
  7327. ++ lockdep_assert_held(&htt->rx_ring.lock);
  7328. ++
  7329. ++ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  7330. ++ if (!msdu)
  7331. ++ return NULL;
  7332. ++
  7333. ++ rxcb = ATH10K_SKB_RXCB(msdu);
  7334. ++ hash_del(&rxcb->hlist);
  7335. ++ htt->rx_ring.fill_cnt--;
  7336. ++
  7337. ++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
  7338. ++ msdu->len + skb_tailroom(msdu),
  7339. ++ DMA_FROM_DEVICE);
  7340. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  7341. ++ msdu->data, msdu->len + skb_tailroom(msdu));
  7342. ++
  7343. ++ return msdu;
  7344. ++}
  7345. ++
  7346. ++static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
  7347. ++ struct htt_rx_in_ord_ind *ev,
  7348. ++ struct sk_buff_head *list)
  7349. + {
  7350. ++ struct ath10k *ar = htt->ar;
  7351. ++ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
  7352. ++ struct htt_rx_desc *rxd;
  7353. ++ struct sk_buff *msdu;
  7354. ++ int msdu_count;
  7355. ++ bool is_offload;
  7356. ++ u32 paddr;
  7357. ++
  7358. ++ lockdep_assert_held(&htt->rx_ring.lock);
  7359. ++
  7360. ++ msdu_count = __le16_to_cpu(ev->msdu_count);
  7361. ++ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  7362. ++
  7363. ++ while (msdu_count--) {
  7364. ++ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  7365. ++
  7366. ++ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  7367. ++ if (!msdu) {
  7368. ++ __skb_queue_purge(list);
  7369. ++ return -ENOENT;
  7370. ++ }
  7371. ++
  7372. ++ __skb_queue_tail(list, msdu);
  7373. ++
  7374. ++ if (!is_offload) {
  7375. ++ rxd = (void *)msdu->data;
  7376. ++
  7377. ++ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  7378. ++
  7379. ++ skb_put(msdu, sizeof(*rxd));
  7380. ++ skb_pull(msdu, sizeof(*rxd));
  7381. ++ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  7382. ++
  7383. ++ if (!(__le32_to_cpu(rxd->attention.flags) &
  7384. ++ RX_ATTENTION_FLAGS_MSDU_DONE)) {
  7385. ++ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  7386. ++ return -EIO;
  7387. ++ }
  7388. ++ }
  7389. ++
  7390. ++ msdu_desc++;
  7391. ++ }
  7392. ++
  7393. ++ return 0;
  7394. ++}
  7395. ++
  7396. ++int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  7397. ++{
  7398. ++ struct ath10k *ar = htt->ar;
  7399. + dma_addr_t paddr;
  7400. + void *vaddr;
  7401. ++ size_t size;
  7402. + struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  7403. +
  7404. +- htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
  7405. +- if (!is_power_of_2(htt->rx_ring.size)) {
  7406. +- ath10k_warn("htt rx ring size is not power of 2\n");
  7407. +- return -EINVAL;
  7408. +- }
  7409. ++ htt->rx_confused = false;
  7410. +
  7411. ++ /* XXX: The fill level could be changed during runtime in response to
  7412. ++ * the host processing latency. Is this really worth it?
  7413. ++ */
  7414. ++ htt->rx_ring.size = HTT_RX_RING_SIZE;
  7415. + htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  7416. ++ htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  7417. +
  7418. +- /*
  7419. +- * Set the initial value for the level to which the rx ring
  7420. +- * should be filled, based on the max throughput and the
  7421. +- * worst likely latency for the host to fill the rx ring
  7422. +- * with new buffers. In theory, this fill level can be
  7423. +- * dynamically adjusted from the initial value set here, to
  7424. +- * reflect the actual host latency rather than a
  7425. +- * conservative assumption about the host latency.
  7426. +- */
  7427. +- htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
  7428. ++ if (!is_power_of_2(htt->rx_ring.size)) {
  7429. ++ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  7430. ++ return -EINVAL;
  7431. ++ }
  7432. +
  7433. + htt->rx_ring.netbufs_ring =
  7434. +- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  7435. ++ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  7436. + GFP_KERNEL);
  7437. + if (!htt->rx_ring.netbufs_ring)
  7438. + goto err_netbuf;
  7439. +
  7440. +- vaddr = dma_alloc_coherent(htt->ar->dev,
  7441. +- (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
  7442. +- &paddr, GFP_DMA);
  7443. ++ size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  7444. ++
  7445. ++ vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
  7446. + if (!vaddr)
  7447. + goto err_dma_ring;
  7448. +
  7449. +@@ -516,7 +551,7 @@ int ath10k_htt_rx_attach(struct ath10k_h
  7450. +
  7451. + htt->rx_ring.alloc_idx.vaddr = vaddr;
  7452. + htt->rx_ring.alloc_idx.paddr = paddr;
  7453. +- htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  7454. ++ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  7455. + *htt->rx_ring.alloc_idx.vaddr = 0;
  7456. +
  7457. + /* Initialize the Rx refill retry timer */
  7458. +@@ -525,28 +560,23 @@ int ath10k_htt_rx_attach(struct ath10k_h
  7459. + spin_lock_init(&htt->rx_ring.lock);
  7460. +
  7461. + htt->rx_ring.fill_cnt = 0;
  7462. +- if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
  7463. +- goto err_fill_ring;
  7464. ++ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  7465. ++ hash_init(htt->rx_ring.skb_table);
  7466. +
  7467. + tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  7468. + (unsigned long)htt);
  7469. +
  7470. + skb_queue_head_init(&htt->tx_compl_q);
  7471. + skb_queue_head_init(&htt->rx_compl_q);
  7472. ++ skb_queue_head_init(&htt->rx_in_ord_compl_q);
  7473. +
  7474. + tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
  7475. + (unsigned long)htt);
  7476. +
  7477. +- ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  7478. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  7479. + htt->rx_ring.size, htt->rx_ring.fill_level);
  7480. + return 0;
  7481. +
  7482. +-err_fill_ring:
  7483. +- ath10k_htt_rx_ring_free(htt);
  7484. +- dma_free_coherent(htt->ar->dev,
  7485. +- sizeof(*htt->rx_ring.alloc_idx.vaddr),
  7486. +- htt->rx_ring.alloc_idx.vaddr,
  7487. +- htt->rx_ring.alloc_idx.paddr);
  7488. + err_dma_idx:
  7489. + dma_free_coherent(htt->ar->dev,
  7490. + (htt->rx_ring.size *
  7491. +@@ -559,73 +589,54 @@ err_netbuf:
  7492. + return -ENOMEM;
  7493. + }
  7494. +
  7495. +-static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
  7496. ++static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  7497. ++ enum htt_rx_mpdu_encrypt_type type)
  7498. + {
  7499. + switch (type) {
  7500. ++ case HTT_RX_MPDU_ENCRYPT_NONE:
  7501. ++ return 0;
  7502. + case HTT_RX_MPDU_ENCRYPT_WEP40:
  7503. + case HTT_RX_MPDU_ENCRYPT_WEP104:
  7504. +- return 4;
  7505. ++ return IEEE80211_WEP_IV_LEN;
  7506. + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  7507. +- case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
  7508. + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  7509. +- case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
  7510. ++ return IEEE80211_TKIP_IV_LEN;
  7511. + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  7512. +- return 8;
  7513. +- case HTT_RX_MPDU_ENCRYPT_NONE:
  7514. +- return 0;
  7515. ++ return IEEE80211_CCMP_HDR_LEN;
  7516. ++ case HTT_RX_MPDU_ENCRYPT_WEP128:
  7517. ++ case HTT_RX_MPDU_ENCRYPT_WAPI:
  7518. ++ break;
  7519. + }
  7520. +
  7521. +- ath10k_warn("unknown encryption type %d\n", type);
  7522. ++ ath10k_warn(ar, "unsupported encryption type %d\n", type);
  7523. + return 0;
  7524. + }
  7525. +
  7526. +-static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
  7527. ++#define MICHAEL_MIC_LEN 8
  7528. ++
  7529. ++static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  7530. ++ enum htt_rx_mpdu_encrypt_type type)
  7531. + {
  7532. + switch (type) {
  7533. + case HTT_RX_MPDU_ENCRYPT_NONE:
  7534. ++ return 0;
  7535. + case HTT_RX_MPDU_ENCRYPT_WEP40:
  7536. + case HTT_RX_MPDU_ENCRYPT_WEP104:
  7537. +- case HTT_RX_MPDU_ENCRYPT_WEP128:
  7538. +- case HTT_RX_MPDU_ENCRYPT_WAPI:
  7539. +- return 0;
  7540. ++ return IEEE80211_WEP_ICV_LEN;
  7541. + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  7542. + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  7543. +- return 4;
  7544. ++ return IEEE80211_TKIP_ICV_LEN;
  7545. + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  7546. +- return 8;
  7547. ++ return IEEE80211_CCMP_MIC_LEN;
  7548. ++ case HTT_RX_MPDU_ENCRYPT_WEP128:
  7549. ++ case HTT_RX_MPDU_ENCRYPT_WAPI:
  7550. ++ break;
  7551. + }
  7552. +
  7553. +- ath10k_warn("unknown encryption type %d\n", type);
  7554. ++ ath10k_warn(ar, "unsupported encryption type %d\n", type);
  7555. + return 0;
  7556. + }
  7557. +
  7558. +-/* Applies for first msdu in chain, before altering it. */
  7559. +-static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
  7560. +-{
  7561. +- struct htt_rx_desc *rxd;
  7562. +- enum rx_msdu_decap_format fmt;
  7563. +-
  7564. +- rxd = (void *)skb->data - sizeof(*rxd);
  7565. +- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  7566. +- RX_MSDU_START_INFO1_DECAP_FORMAT);
  7567. +-
  7568. +- if (fmt == RX_MSDU_DECAP_RAW)
  7569. +- return (void *)skb->data;
  7570. +- else
  7571. +- return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
  7572. +-}
  7573. +-
  7574. +-/* This function only applies for first msdu in an msdu chain */
  7575. +-static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
  7576. +-{
  7577. +- if (ieee80211_is_data_qos(hdr->frame_control)) {
  7578. +- u8 *qc = ieee80211_get_qos_ctl(hdr);
  7579. +- if (qc[0] & 0x80)
  7580. +- return true;
  7581. +- }
  7582. +- return false;
  7583. +-}
  7584. +-
  7585. + struct rfc1042_hdr {
  7586. + u8 llc_dsap;
  7587. + u8 llc_ssap;
  7588. +@@ -660,23 +671,34 @@ static const u8 rx_legacy_rate_idx[] = {
  7589. + };
  7590. +
  7591. + static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  7592. +- enum ieee80211_band band,
  7593. +- u8 info0, u32 info1, u32 info2,
  7594. +- struct ieee80211_rx_status *status)
  7595. ++ struct ieee80211_rx_status *status,
  7596. ++ struct htt_rx_desc *rxd)
  7597. + {
  7598. ++ enum ieee80211_band band;
  7599. + u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
  7600. + u8 preamble = 0;
  7601. ++ u32 info1, info2, info3;
  7602. +
  7603. +- /* Check if valid fields */
  7604. +- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
  7605. ++ /* Band value can't be set as undefined but freq can be 0 - use that to
  7606. ++ * determine whether band is provided.
  7607. ++ *
  7608. ++ * FIXME: Perhaps this can go away if CCK rate reporting is a little
  7609. ++ * reworked?
  7610. ++ */
  7611. ++ if (!status->freq)
  7612. + return;
  7613. +
  7614. +- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
  7615. ++ band = status->band;
  7616. ++ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  7617. ++ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  7618. ++ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  7619. ++
  7620. ++ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  7621. +
  7622. + switch (preamble) {
  7623. + case HTT_RX_LEGACY:
  7624. +- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
  7625. +- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
  7626. ++ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  7627. ++ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  7628. + rate_idx = 0;
  7629. +
  7630. + if (rate < 0x08 || rate > 0x0F)
  7631. +@@ -703,11 +725,11 @@ static void ath10k_htt_rx_h_rates(struct
  7632. + break;
  7633. + case HTT_RX_HT:
  7634. + case HTT_RX_HT_WITH_TXBF:
  7635. +- /* HT-SIG - Table 20-11 in info1 and info2 */
  7636. +- mcs = info1 & 0x1F;
  7637. ++ /* HT-SIG - Table 20-11 in info2 and info3 */
  7638. ++ mcs = info2 & 0x1F;
  7639. + nss = mcs >> 3;
  7640. +- bw = (info1 >> 7) & 1;
  7641. +- sgi = (info2 >> 7) & 1;
  7642. ++ bw = (info2 >> 7) & 1;
  7643. ++ sgi = (info3 >> 7) & 1;
  7644. +
  7645. + status->rate_idx = mcs;
  7646. + status->flag |= RX_FLAG_HT;
  7647. +@@ -718,12 +740,12 @@ static void ath10k_htt_rx_h_rates(struct
  7648. + break;
  7649. + case HTT_RX_VHT:
  7650. + case HTT_RX_VHT_WITH_TXBF:
  7651. +- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
  7652. ++ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  7653. + TODO check this */
  7654. +- mcs = (info2 >> 4) & 0x0F;
  7655. +- nss = ((info1 >> 10) & 0x07) + 1;
  7656. +- bw = info1 & 3;
  7657. +- sgi = info2 & 1;
  7658. ++ mcs = (info3 >> 4) & 0x0F;
  7659. ++ nss = ((info2 >> 10) & 0x07) + 1;
  7660. ++ bw = info2 & 3;
  7661. ++ sgi = info3 & 1;
  7662. +
  7663. + status->rate_idx = mcs;
  7664. + status->vht_nss = nss;
  7665. +@@ -751,28 +773,6 @@ static void ath10k_htt_rx_h_rates(struct
  7666. + }
  7667. + }
  7668. +
  7669. +-static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
  7670. +- struct ieee80211_rx_status *rx_status,
  7671. +- struct sk_buff *skb,
  7672. +- enum htt_rx_mpdu_encrypt_type enctype)
  7673. +-{
  7674. +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  7675. +-
  7676. +-
  7677. +- if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
  7678. +- rx_status->flag &= ~(RX_FLAG_DECRYPTED |
  7679. +- RX_FLAG_IV_STRIPPED |
  7680. +- RX_FLAG_MMIC_STRIPPED);
  7681. +- return;
  7682. +- }
  7683. +-
  7684. +- rx_status->flag |= RX_FLAG_DECRYPTED |
  7685. +- RX_FLAG_IV_STRIPPED |
  7686. +- RX_FLAG_MMIC_STRIPPED;
  7687. +- hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
  7688. +- ~IEEE80211_FCTL_PROTECTED);
  7689. +-}
  7690. +-
  7691. + static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  7692. + struct ieee80211_rx_status *status)
  7693. + {
  7694. +@@ -793,19 +793,121 @@ static bool ath10k_htt_rx_h_channel(stru
  7695. + return true;
  7696. + }
  7697. +
  7698. ++static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  7699. ++ struct ieee80211_rx_status *status,
  7700. ++ struct htt_rx_desc *rxd)
  7701. ++{
  7702. ++ /* FIXME: Get real NF */
  7703. ++ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  7704. ++ rxd->ppdu_start.rssi_comb;
  7705. ++ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  7706. ++}
  7707. ++
  7708. ++static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  7709. ++ struct ieee80211_rx_status *status,
  7710. ++ struct htt_rx_desc *rxd)
  7711. ++{
  7712. ++ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  7713. ++ * means all prior MSDUs in a PPDU are reported to mac80211 without the
  7714. ++ * TSF. Is it worth holding frames until end of PPDU is known?
  7715. ++ *
  7716. ++ * FIXME: Can we get/compute 64bit TSF?
  7717. ++ */
  7718. ++ status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  7719. ++ status->flag |= RX_FLAG_MACTIME_END;
  7720. ++}
  7721. ++
  7722. ++static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  7723. ++ struct sk_buff_head *amsdu,
  7724. ++ struct ieee80211_rx_status *status)
  7725. ++{
  7726. ++ struct sk_buff *first;
  7727. ++ struct htt_rx_desc *rxd;
  7728. ++ bool is_first_ppdu;
  7729. ++ bool is_last_ppdu;
  7730. ++
  7731. ++ if (skb_queue_empty(amsdu))
  7732. ++ return;
  7733. ++
  7734. ++ first = skb_peek(amsdu);
  7735. ++ rxd = (void *)first->data - sizeof(*rxd);
  7736. ++
  7737. ++ is_first_ppdu = !!(rxd->attention.flags &
  7738. ++ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  7739. ++ is_last_ppdu = !!(rxd->attention.flags &
  7740. ++ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  7741. ++
  7742. ++ if (is_first_ppdu) {
  7743. ++ /* New PPDU starts so clear out the old per-PPDU status. */
  7744. ++ status->freq = 0;
  7745. ++ status->rate_idx = 0;
  7746. ++ status->vht_nss = 0;
  7747. ++ status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
  7748. ++ status->flag &= ~(RX_FLAG_HT |
  7749. ++ RX_FLAG_VHT |
  7750. ++ RX_FLAG_SHORT_GI |
  7751. ++ RX_FLAG_40MHZ |
  7752. ++ RX_FLAG_MACTIME_END);
  7753. ++ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  7754. ++
  7755. ++ ath10k_htt_rx_h_signal(ar, status, rxd);
  7756. ++ ath10k_htt_rx_h_channel(ar, status);
  7757. ++ ath10k_htt_rx_h_rates(ar, status, rxd);
  7758. ++ }
  7759. ++
  7760. ++ if (is_last_ppdu)
  7761. ++ ath10k_htt_rx_h_mactime(ar, status, rxd);
  7762. ++}
  7763. ++
  7764. ++static const char * const tid_to_ac[] = {
  7765. ++ "BE",
  7766. ++ "BK",
  7767. ++ "BK",
  7768. ++ "BE",
  7769. ++ "VI",
  7770. ++ "VI",
  7771. ++ "VO",
  7772. ++ "VO",
  7773. ++};
  7774. ++
  7775. ++static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  7776. ++{
  7777. ++ u8 *qc;
  7778. ++ int tid;
  7779. ++
  7780. ++ if (!ieee80211_is_data_qos(hdr->frame_control))
  7781. ++ return "";
  7782. ++
  7783. ++ qc = ieee80211_get_qos_ctl(hdr);
  7784. ++ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  7785. ++ if (tid < 8)
  7786. ++ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  7787. ++ else
  7788. ++ snprintf(out, size, "tid %d", tid);
  7789. ++
  7790. ++ return out;
  7791. ++}
  7792. ++
  7793. + static void ath10k_process_rx(struct ath10k *ar,
  7794. + struct ieee80211_rx_status *rx_status,
  7795. + struct sk_buff *skb)
  7796. + {
  7797. + struct ieee80211_rx_status *status;
  7798. ++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  7799. ++ char tid[32];
  7800. +
  7801. + status = IEEE80211_SKB_RXCB(skb);
  7802. + *status = *rx_status;
  7803. +
  7804. +- ath10k_dbg(ATH10K_DBG_DATA,
  7805. +- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
  7806. ++ ath10k_dbg(ar, ATH10K_DBG_DATA,
  7807. ++ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  7808. + skb,
  7809. + skb->len,
  7810. ++ ieee80211_get_SA(hdr),
  7811. ++ ath10k_get_tid(hdr, tid, sizeof(tid)),
  7812. ++ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  7813. ++ "mcast" : "ucast",
  7814. ++ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  7815. + status->flag == 0 ? "legacy" : "",
  7816. + status->flag & RX_FLAG_HT ? "ht" : "",
  7817. + status->flag & RX_FLAG_VHT ? "vht" : "",
  7818. +@@ -817,9 +919,12 @@ static void ath10k_process_rx(struct ath
  7819. + status->freq,
  7820. + status->band, status->flag,
  7821. + !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  7822. +- !!(status->flag & RX_FLAG_MMIC_ERROR));
  7823. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  7824. ++ !!(status->flag & RX_FLAG_MMIC_ERROR),
  7825. ++ !!(status->flag & RX_FLAG_AMSDU_MORE));
  7826. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  7827. + skb->data, skb->len);
  7828. ++ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  7829. ++ trace_ath10k_rx_payload(ar, skb->data, skb->len);
  7830. +
  7831. + ieee80211_rx(ar->hw, skb);
  7832. + }
  7833. +@@ -830,179 +935,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(st
  7834. + return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  7835. + }
  7836. +
  7837. +-static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
  7838. +- struct ieee80211_rx_status *rx_status,
  7839. +- struct sk_buff *skb_in)
  7840. ++static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  7841. ++ struct sk_buff *msdu,
  7842. ++ struct ieee80211_rx_status *status,
  7843. ++ enum htt_rx_mpdu_encrypt_type enctype,
  7844. ++ bool is_decrypted)
  7845. + {
  7846. ++ struct ieee80211_hdr *hdr;
  7847. + struct htt_rx_desc *rxd;
  7848. +- struct sk_buff *skb = skb_in;
  7849. +- struct sk_buff *first;
  7850. +- enum rx_msdu_decap_format fmt;
  7851. +- enum htt_rx_mpdu_encrypt_type enctype;
  7852. ++ size_t hdr_len;
  7853. ++ size_t crypto_len;
  7854. ++ bool is_first;
  7855. ++ bool is_last;
  7856. ++
  7857. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  7858. ++ is_first = !!(rxd->msdu_end.info0 &
  7859. ++ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  7860. ++ is_last = !!(rxd->msdu_end.info0 &
  7861. ++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  7862. ++
  7863. ++ /* Delivered decapped frame:
  7864. ++ * [802.11 header]
  7865. ++ * [crypto param] <-- can be trimmed if !fcs_err &&
  7866. ++ * !decrypt_err && !peer_idx_invalid
  7867. ++ * [amsdu header] <-- only if A-MSDU
  7868. ++ * [rfc1042/llc]
  7869. ++ * [payload]
  7870. ++ * [FCS] <-- at end, needs to be trimmed
  7871. ++ */
  7872. ++
  7873. ++ /* This probably shouldn't happen but warn just in case */
  7874. ++ if (unlikely(WARN_ON_ONCE(!is_first)))
  7875. ++ return;
  7876. ++
  7877. ++ /* This probably shouldn't happen but warn just in case */
  7878. ++ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  7879. ++ return;
  7880. ++
  7881. ++ skb_trim(msdu, msdu->len - FCS_LEN);
  7882. ++
  7883. ++ /* In most cases this will be true for sniffed frames. It makes sense
  7884. ++ * to deliver them as-is without stripping the crypto param. This would
  7885. ++ * also make sense for software based decryption (which is not
  7886. ++ * implemented in ath10k).
  7887. ++ *
  7888. ++ * If there's no error then the frame is decrypted. At least that is
  7889. ++ * the case for frames that come in via fragmented rx indication.
  7890. ++ */
  7891. ++ if (!is_decrypted)
  7892. ++ return;
  7893. ++
  7894. ++ /* The payload is decrypted so strip crypto params. Start from tail
  7895. ++ * since hdr is used to compute some stuff.
  7896. ++ */
  7897. ++
  7898. ++ hdr = (void *)msdu->data;
  7899. ++
  7900. ++ /* Tail */
  7901. ++ skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
  7902. ++
  7903. ++ /* MMIC */
  7904. ++ if (!ieee80211_has_morefrags(hdr->frame_control) &&
  7905. ++ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  7906. ++ skb_trim(msdu, msdu->len - 8);
  7907. ++
  7908. ++ /* Head */
  7909. ++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
  7910. ++ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  7911. ++
  7912. ++ memmove((void *)msdu->data + crypto_len,
  7913. ++ (void *)msdu->data, hdr_len);
  7914. ++ skb_pull(msdu, crypto_len);
  7915. ++}
  7916. ++
  7917. ++static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  7918. ++ struct sk_buff *msdu,
  7919. ++ struct ieee80211_rx_status *status,
  7920. ++ const u8 first_hdr[64])
  7921. ++{
  7922. + struct ieee80211_hdr *hdr;
  7923. +- u8 hdr_buf[64], addr[ETH_ALEN], *qos;
  7924. +- unsigned int hdr_len;
  7925. ++ size_t hdr_len;
  7926. ++ u8 da[ETH_ALEN];
  7927. ++ u8 sa[ETH_ALEN];
  7928. ++
  7929. ++ /* Delivered decapped frame:
  7930. ++ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  7931. ++ * [rfc1042/llc]
  7932. ++ *
  7933. ++ * Note: The nwifi header doesn't have QoS Control and is
  7934. ++ * (always?) a 3addr frame.
  7935. ++ *
  7936. ++ * Note2: There's no A-MSDU subframe header. Even if it's part
  7937. ++ * of an A-MSDU.
  7938. ++ */
  7939. +
  7940. +- rxd = (void *)skb->data - sizeof(*rxd);
  7941. +- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  7942. +- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  7943. ++ /* pull decapped header and copy SA & DA */
  7944. ++ hdr = (struct ieee80211_hdr *)msdu->data;
  7945. ++ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  7946. ++ ether_addr_copy(da, ieee80211_get_DA(hdr));
  7947. ++ ether_addr_copy(sa, ieee80211_get_SA(hdr));
  7948. ++ skb_pull(msdu, hdr_len);
  7949. +
  7950. +- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  7951. ++ /* push original 802.11 header */
  7952. ++ hdr = (struct ieee80211_hdr *)first_hdr;
  7953. + hdr_len = ieee80211_hdrlen(hdr->frame_control);
  7954. +- memcpy(hdr_buf, hdr, hdr_len);
  7955. +- hdr = (struct ieee80211_hdr *)hdr_buf;
  7956. ++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  7957. +
  7958. +- first = skb;
  7959. +- while (skb) {
  7960. +- void *decap_hdr;
  7961. +- int len;
  7962. +-
  7963. +- rxd = (void *)skb->data - sizeof(*rxd);
  7964. +- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  7965. +- RX_MSDU_START_INFO1_DECAP_FORMAT);
  7966. +- decap_hdr = (void *)rxd->rx_hdr_status;
  7967. +-
  7968. +- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  7969. +-
  7970. +- /* First frame in an A-MSDU chain has more decapped data. */
  7971. +- if (skb == first) {
  7972. +- len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  7973. +- len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
  7974. +- 4);
  7975. +- decap_hdr += len;
  7976. +- }
  7977. ++ /* original 802.11 header has a different DA and in
  7978. ++ * case of 4addr it may also have different SA
  7979. ++ */
  7980. ++ hdr = (struct ieee80211_hdr *)msdu->data;
  7981. ++ ether_addr_copy(ieee80211_get_DA(hdr), da);
  7982. ++ ether_addr_copy(ieee80211_get_SA(hdr), sa);
  7983. ++}
  7984. +
  7985. +- switch (fmt) {
  7986. +- case RX_MSDU_DECAP_RAW:
  7987. +- /* remove trailing FCS */
  7988. +- skb_trim(skb, skb->len - FCS_LEN);
  7989. +- break;
  7990. +- case RX_MSDU_DECAP_NATIVE_WIFI:
  7991. +- /* pull decapped header and copy DA */
  7992. +- hdr = (struct ieee80211_hdr *)skb->data;
  7993. +- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  7994. +- memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
  7995. +- skb_pull(skb, hdr_len);
  7996. +-
  7997. +- /* push original 802.11 header */
  7998. +- hdr = (struct ieee80211_hdr *)hdr_buf;
  7999. +- hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8000. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8001. +-
  8002. +- /* original A-MSDU header has the bit set but we're
  8003. +- * not including A-MSDU subframe header */
  8004. +- hdr = (struct ieee80211_hdr *)skb->data;
  8005. +- qos = ieee80211_get_qos_ctl(hdr);
  8006. +- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  8007. ++static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  8008. ++ struct sk_buff *msdu,
  8009. ++ enum htt_rx_mpdu_encrypt_type enctype)
  8010. ++{
  8011. ++ struct ieee80211_hdr *hdr;
  8012. ++ struct htt_rx_desc *rxd;
  8013. ++ size_t hdr_len, crypto_len;
  8014. ++ void *rfc1042;
  8015. ++ bool is_first, is_last, is_amsdu;
  8016. +
  8017. +- /* original 802.11 header has a different DA */
  8018. +- memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
  8019. +- break;
  8020. +- case RX_MSDU_DECAP_ETHERNET2_DIX:
  8021. +- /* strip ethernet header and insert decapped 802.11
  8022. +- * header, amsdu subframe header and rfc1042 header */
  8023. +-
  8024. +- len = 0;
  8025. +- len += sizeof(struct rfc1042_hdr);
  8026. +- len += sizeof(struct amsdu_subframe_hdr);
  8027. +-
  8028. +- skb_pull(skb, sizeof(struct ethhdr));
  8029. +- memcpy(skb_push(skb, len), decap_hdr, len);
  8030. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8031. +- break;
  8032. +- case RX_MSDU_DECAP_8023_SNAP_LLC:
  8033. +- /* insert decapped 802.11 header making a singly
  8034. +- * A-MSDU */
  8035. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8036. +- break;
  8037. +- }
  8038. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  8039. ++ hdr = (void *)rxd->rx_hdr_status;
  8040. +
  8041. +- skb_in = skb;
  8042. +- ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
  8043. +- skb = skb->next;
  8044. +- skb_in->next = NULL;
  8045. ++ is_first = !!(rxd->msdu_end.info0 &
  8046. ++ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  8047. ++ is_last = !!(rxd->msdu_end.info0 &
  8048. ++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  8049. ++ is_amsdu = !(is_first && is_last);
  8050. +
  8051. +- if (skb)
  8052. +- rx_status->flag |= RX_FLAG_AMSDU_MORE;
  8053. +- else
  8054. +- rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
  8055. ++ rfc1042 = hdr;
  8056. ++
  8057. ++ if (is_first) {
  8058. ++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8059. ++ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  8060. +
  8061. +- ath10k_process_rx(htt->ar, rx_status, skb_in);
  8062. ++ rfc1042 += round_up(hdr_len, 4) +
  8063. ++ round_up(crypto_len, 4);
  8064. + }
  8065. +
  8066. +- /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
  8067. +- * monitor interface active for sniffing purposes. */
  8068. ++ if (is_amsdu)
  8069. ++ rfc1042 += sizeof(struct amsdu_subframe_hdr);
  8070. ++
  8071. ++ return rfc1042;
  8072. + }
  8073. +
  8074. +-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
  8075. +- struct ieee80211_rx_status *rx_status,
  8076. +- struct sk_buff *skb)
  8077. ++static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  8078. ++ struct sk_buff *msdu,
  8079. ++ struct ieee80211_rx_status *status,
  8080. ++ const u8 first_hdr[64],
  8081. ++ enum htt_rx_mpdu_encrypt_type enctype)
  8082. + {
  8083. +- struct htt_rx_desc *rxd;
  8084. + struct ieee80211_hdr *hdr;
  8085. +- enum rx_msdu_decap_format fmt;
  8086. +- enum htt_rx_mpdu_encrypt_type enctype;
  8087. +- int hdr_len;
  8088. ++ struct ethhdr *eth;
  8089. ++ size_t hdr_len;
  8090. + void *rfc1042;
  8091. ++ u8 da[ETH_ALEN];
  8092. ++ u8 sa[ETH_ALEN];
  8093. +
  8094. +- /* This shouldn't happen. If it does than it may be a FW bug. */
  8095. +- if (skb->next) {
  8096. +- ath10k_warn("htt rx received chained non A-MSDU frame\n");
  8097. +- ath10k_htt_rx_free_msdu_chain(skb->next);
  8098. +- skb->next = NULL;
  8099. +- }
  8100. ++ /* Delivered decapped frame:
  8101. ++ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  8102. ++ * [payload]
  8103. ++ */
  8104. +
  8105. +- rxd = (void *)skb->data - sizeof(*rxd);
  8106. +- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  8107. +- RX_MSDU_START_INFO1_DECAP_FORMAT);
  8108. +- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  8109. +- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  8110. +- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  8111. ++ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  8112. ++ if (WARN_ON_ONCE(!rfc1042))
  8113. ++ return;
  8114. ++
  8115. ++ /* pull decapped header and copy SA & DA */
  8116. ++ eth = (struct ethhdr *)msdu->data;
  8117. ++ ether_addr_copy(da, eth->h_dest);
  8118. ++ ether_addr_copy(sa, eth->h_source);
  8119. ++ skb_pull(msdu, sizeof(struct ethhdr));
  8120. ++
  8121. ++ /* push rfc1042/llc/snap */
  8122. ++ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  8123. ++ sizeof(struct rfc1042_hdr));
  8124. ++
  8125. ++ /* push original 802.11 header */
  8126. ++ hdr = (struct ieee80211_hdr *)first_hdr;
  8127. + hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8128. ++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  8129. ++
  8130. ++ /* original 802.11 header has a different DA and in
  8131. ++ * case of 4addr it may also have different SA
  8132. ++ */
  8133. ++ hdr = (struct ieee80211_hdr *)msdu->data;
  8134. ++ ether_addr_copy(ieee80211_get_DA(hdr), da);
  8135. ++ ether_addr_copy(ieee80211_get_SA(hdr), sa);
  8136. ++}
  8137. ++
  8138. ++static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  8139. ++ struct sk_buff *msdu,
  8140. ++ struct ieee80211_rx_status *status,
  8141. ++ const u8 first_hdr[64])
  8142. ++{
  8143. ++ struct ieee80211_hdr *hdr;
  8144. ++ size_t hdr_len;
  8145. ++
  8146. ++ /* Delivered decapped frame:
  8147. ++ * [amsdu header] <-- replaced with 802.11 hdr
  8148. ++ * [rfc1042/llc]
  8149. ++ * [payload]
  8150. ++ */
  8151. ++
  8152. ++ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
  8153. ++
  8154. ++ hdr = (struct ieee80211_hdr *)first_hdr;
  8155. ++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8156. ++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  8157. ++}
  8158. ++
  8159. ++static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  8160. ++ struct sk_buff *msdu,
  8161. ++ struct ieee80211_rx_status *status,
  8162. ++ u8 first_hdr[64],
  8163. ++ enum htt_rx_mpdu_encrypt_type enctype,
  8164. ++ bool is_decrypted)
  8165. ++{
  8166. ++ struct htt_rx_desc *rxd;
  8167. ++ enum rx_msdu_decap_format decap;
  8168. ++ struct ieee80211_hdr *hdr;
  8169. ++
  8170. ++ /* First msdu's decapped header:
  8171. ++ * [802.11 header] <-- padded to 4 bytes long
  8172. ++ * [crypto param] <-- padded to 4 bytes long
  8173. ++ * [amsdu header] <-- only if A-MSDU
  8174. ++ * [rfc1042/llc]
  8175. ++ *
  8176. ++ * Other (2nd, 3rd, ..) msdu's decapped header:
  8177. ++ * [amsdu header] <-- only if A-MSDU
  8178. ++ * [rfc1042/llc]
  8179. ++ */
  8180. +
  8181. +- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  8182. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  8183. ++ hdr = (void *)rxd->rx_hdr_status;
  8184. ++ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
  8185. ++ RX_MSDU_START_INFO1_DECAP_FORMAT);
  8186. +
  8187. +- switch (fmt) {
  8188. ++ switch (decap) {
  8189. + case RX_MSDU_DECAP_RAW:
  8190. +- /* remove trailing FCS */
  8191. +- skb_trim(skb, skb->len - FCS_LEN);
  8192. ++ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  8193. ++ is_decrypted);
  8194. + break;
  8195. + case RX_MSDU_DECAP_NATIVE_WIFI:
  8196. +- /* Pull decapped header */
  8197. +- hdr = (struct ieee80211_hdr *)skb->data;
  8198. +- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  8199. +- skb_pull(skb, hdr_len);
  8200. +-
  8201. +- /* Push original header */
  8202. +- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  8203. +- hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8204. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8205. ++ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
  8206. + break;
  8207. + case RX_MSDU_DECAP_ETHERNET2_DIX:
  8208. +- /* strip ethernet header and insert decapped 802.11 header and
  8209. +- * rfc1042 header */
  8210. +-
  8211. +- rfc1042 = hdr;
  8212. +- rfc1042 += roundup(hdr_len, 4);
  8213. +- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
  8214. +-
  8215. +- skb_pull(skb, sizeof(struct ethhdr));
  8216. +- memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
  8217. +- rfc1042, sizeof(struct rfc1042_hdr));
  8218. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8219. ++ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  8220. + break;
  8221. + case RX_MSDU_DECAP_8023_SNAP_LLC:
  8222. +- /* remove A-MSDU subframe header and insert
  8223. +- * decapped 802.11 header. rfc1042 header is already there */
  8224. +-
  8225. +- skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
  8226. +- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  8227. ++ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
  8228. + break;
  8229. + }
  8230. +-
  8231. +- ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
  8232. +-
  8233. +- ath10k_process_rx(htt->ar, rx_status, skb);
  8234. + }
  8235. +
  8236. + static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  8237. +@@ -1036,10 +1225,128 @@ static int ath10k_htt_rx_get_csum_state(
  8238. + return CHECKSUM_UNNECESSARY;
  8239. + }
  8240. +
  8241. +-static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
  8242. ++static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  8243. ++{
  8244. ++ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  8245. ++}
  8246. ++
  8247. ++static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  8248. ++ struct sk_buff_head *amsdu,
  8249. ++ struct ieee80211_rx_status *status)
  8250. ++{
  8251. ++ struct sk_buff *first;
  8252. ++ struct sk_buff *last;
  8253. ++ struct sk_buff *msdu;
  8254. ++ struct htt_rx_desc *rxd;
  8255. ++ struct ieee80211_hdr *hdr;
  8256. ++ enum htt_rx_mpdu_encrypt_type enctype;
  8257. ++ u8 first_hdr[64];
  8258. ++ u8 *qos;
  8259. ++ size_t hdr_len;
  8260. ++ bool has_fcs_err;
  8261. ++ bool has_crypto_err;
  8262. ++ bool has_tkip_err;
  8263. ++ bool has_peer_idx_invalid;
  8264. ++ bool is_decrypted;
  8265. ++ u32 attention;
  8266. ++
  8267. ++ if (skb_queue_empty(amsdu))
  8268. ++ return;
  8269. ++
  8270. ++ first = skb_peek(amsdu);
  8271. ++ rxd = (void *)first->data - sizeof(*rxd);
  8272. ++
  8273. ++ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  8274. ++ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  8275. ++
  8276. ++ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  8277. ++ * decapped header. It'll be used for undecapping of each MSDU.
  8278. ++ */
  8279. ++ hdr = (void *)rxd->rx_hdr_status;
  8280. ++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
  8281. ++ memcpy(first_hdr, hdr, hdr_len);
  8282. ++
  8283. ++ /* Each A-MSDU subframe will use the original header as the base and be
  8284. ++ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  8285. ++ */
  8286. ++ hdr = (void *)first_hdr;
  8287. ++ qos = ieee80211_get_qos_ctl(hdr);
  8288. ++ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  8289. ++
  8290. ++ /* Some attention flags are valid only in the last MSDU. */
  8291. ++ last = skb_peek_tail(amsdu);
  8292. ++ rxd = (void *)last->data - sizeof(*rxd);
  8293. ++ attention = __le32_to_cpu(rxd->attention.flags);
  8294. ++
  8295. ++ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  8296. ++ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  8297. ++ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  8298. ++ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  8299. ++
  8300. ++ /* Note: If hardware captures an encrypted frame that it can't decrypt,
  8301. ++ * e.g. due to fcs error, missing peer or invalid key data it will
  8302. ++ * report the frame as raw.
  8303. ++ */
  8304. ++ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  8305. ++ !has_fcs_err &&
  8306. ++ !has_crypto_err &&
  8307. ++ !has_peer_idx_invalid);
  8308. ++
  8309. ++ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  8310. ++ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  8311. ++ RX_FLAG_MMIC_ERROR |
  8312. ++ RX_FLAG_DECRYPTED |
  8313. ++ RX_FLAG_IV_STRIPPED |
  8314. ++ RX_FLAG_MMIC_STRIPPED);
  8315. ++
  8316. ++ if (has_fcs_err)
  8317. ++ status->flag |= RX_FLAG_FAILED_FCS_CRC;
  8318. ++
  8319. ++ if (has_tkip_err)
  8320. ++ status->flag |= RX_FLAG_MMIC_ERROR;
  8321. ++
  8322. ++ if (is_decrypted)
  8323. ++ status->flag |= RX_FLAG_DECRYPTED |
  8324. ++ RX_FLAG_IV_STRIPPED |
  8325. ++ RX_FLAG_MMIC_STRIPPED;
  8326. ++
  8327. ++ skb_queue_walk(amsdu, msdu) {
  8328. ++ ath10k_htt_rx_h_csum_offload(msdu);
  8329. ++ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  8330. ++ is_decrypted);
  8331. ++
  8332. ++ /* Undecapping involves copying the original 802.11 header back
  8333. ++ * to sk_buff. If frame is protected and hardware has decrypted
  8334. ++ * it then remove the protected bit.
  8335. ++ */
  8336. ++ if (!is_decrypted)
  8337. ++ continue;
  8338. ++
  8339. ++ hdr = (void *)msdu->data;
  8340. ++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8341. ++ }
  8342. ++}
  8343. ++
  8344. ++static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  8345. ++ struct sk_buff_head *amsdu,
  8346. ++ struct ieee80211_rx_status *status)
  8347. ++{
  8348. ++ struct sk_buff *msdu;
  8349. ++
  8350. ++ while ((msdu = __skb_dequeue(amsdu))) {
  8351. ++ /* Setup per-MSDU flags */
  8352. ++ if (skb_queue_empty(amsdu))
  8353. ++ status->flag &= ~RX_FLAG_AMSDU_MORE;
  8354. ++ else
  8355. ++ status->flag |= RX_FLAG_AMSDU_MORE;
  8356. ++
  8357. ++ ath10k_process_rx(ar, status, msdu);
  8358. ++ }
  8359. ++}
  8360. ++
  8361. ++static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  8362. + {
  8363. +- struct sk_buff *next = msdu_head->next;
  8364. +- struct sk_buff *to_free = next;
  8365. ++ struct sk_buff *skb, *first;
  8366. + int space;
  8367. + int total_len = 0;
  8368. +
  8369. +@@ -1050,110 +1357,142 @@ static int ath10k_unchain_msdu(struct sk
  8370. + * skb?
  8371. + */
  8372. +
  8373. +- msdu_head->next = NULL;
  8374. ++ first = __skb_dequeue(amsdu);
  8375. +
  8376. + /* Allocate total length all at once. */
  8377. +- while (next) {
  8378. +- total_len += next->len;
  8379. +- next = next->next;
  8380. +- }
  8381. ++ skb_queue_walk(amsdu, skb)
  8382. ++ total_len += skb->len;
  8383. +
  8384. +- space = total_len - skb_tailroom(msdu_head);
  8385. ++ space = total_len - skb_tailroom(first);
  8386. + if ((space > 0) &&
  8387. +- (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
  8388. ++ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  8389. + /* TODO: bump some rx-oom error stat */
  8390. + /* put it back together so we can free the
  8391. + * whole list at once.
  8392. + */
  8393. +- msdu_head->next = to_free;
  8394. ++ __skb_queue_head(amsdu, first);
  8395. + return -1;
  8396. + }
  8397. +
  8398. + /* Walk list again, copying contents into
  8399. + * msdu_head
  8400. + */
  8401. +- next = to_free;
  8402. +- while (next) {
  8403. +- skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
  8404. +- next->len);
  8405. +- next = next->next;
  8406. ++ while ((skb = __skb_dequeue(amsdu))) {
  8407. ++ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  8408. ++ skb->len);
  8409. ++ dev_kfree_skb_any(skb);
  8410. + }
  8411. +
  8412. +- /* If here, we have consolidated skb. Free the
  8413. +- * fragments and pass the main skb on up the
  8414. +- * stack.
  8415. +- */
  8416. +- ath10k_htt_rx_free_msdu_chain(to_free);
  8417. ++ __skb_queue_head(amsdu, first);
  8418. + return 0;
  8419. + }
  8420. +
  8421. +-static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
  8422. +- struct sk_buff *head,
  8423. +- enum htt_rx_mpdu_status status,
  8424. +- bool channel_set,
  8425. +- u32 attention)
  8426. +-{
  8427. +- if (head->len == 0) {
  8428. +- ath10k_dbg(ATH10K_DBG_HTT,
  8429. +- "htt rx dropping due to zero-len\n");
  8430. +- return false;
  8431. +- }
  8432. ++static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  8433. ++ struct sk_buff_head *amsdu,
  8434. ++ bool chained)
  8435. ++{
  8436. ++ struct sk_buff *first;
  8437. ++ struct htt_rx_desc *rxd;
  8438. ++ enum rx_msdu_decap_format decap;
  8439. +
  8440. +- if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
  8441. +- ath10k_dbg(ATH10K_DBG_HTT,
  8442. +- "htt rx dropping due to decrypt-err\n");
  8443. +- return false;
  8444. +- }
  8445. ++ first = skb_peek(amsdu);
  8446. ++ rxd = (void *)first->data - sizeof(*rxd);
  8447. ++ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
  8448. ++ RX_MSDU_START_INFO1_DECAP_FORMAT);
  8449. +
  8450. +- if (!channel_set) {
  8451. +- ath10k_warn("no channel configured; ignoring frame!\n");
  8452. +- return false;
  8453. ++ if (!chained)
  8454. ++ return;
  8455. ++
  8456. ++ /* FIXME: Current unchaining logic can only handle simple case of raw
  8457. ++ * msdu chaining. If decapping is other than raw the chaining may be
  8458. ++ * more complex and this isn't handled by the current code. Don't even
  8459. ++ * try re-constructing such frames - it'll be pretty much garbage.
  8460. ++ */
  8461. ++ if (decap != RX_MSDU_DECAP_RAW ||
  8462. ++ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  8463. ++ __skb_queue_purge(amsdu);
  8464. ++ return;
  8465. + }
  8466. +
  8467. +- /* Skip mgmt frames while we handle this in WMI */
  8468. +- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
  8469. +- attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
  8470. +- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
  8471. ++ ath10k_unchain_msdu(amsdu);
  8472. ++}
  8473. ++
  8474. ++static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  8475. ++ struct sk_buff_head *amsdu,
  8476. ++ struct ieee80211_rx_status *rx_status)
  8477. ++{
  8478. ++ struct sk_buff *msdu;
  8479. ++ struct htt_rx_desc *rxd;
  8480. ++ bool is_mgmt;
  8481. ++ bool has_fcs_err;
  8482. ++
  8483. ++ msdu = skb_peek(amsdu);
  8484. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  8485. ++
  8486. ++ /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  8487. ++ * invalid/dangerous frames.
  8488. ++ */
  8489. ++
  8490. ++ if (!rx_status->freq) {
  8491. ++ ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
  8492. + return false;
  8493. + }
  8494. +
  8495. +- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
  8496. +- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
  8497. +- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
  8498. +- !htt->ar->monitor_started) {
  8499. +- ath10k_dbg(ATH10K_DBG_HTT,
  8500. +- "htt rx ignoring frame w/ status %d\n",
  8501. +- status);
  8502. ++ is_mgmt = !!(rxd->attention.flags &
  8503. ++ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  8504. ++ has_fcs_err = !!(rxd->attention.flags &
  8505. ++ __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
  8506. ++
  8507. ++ /* Management frames are handled via WMI events. The pros of such
  8508. ++ * approach is that channel is explicitly provided in WMI events
  8509. ++ * whereas HTT doesn't provide channel information for Rxed frames.
  8510. ++ *
  8511. ++ * However some firmware revisions don't report corrupted frames via
  8512. ++ * WMI so don't drop them.
  8513. ++ */
  8514. ++ if (is_mgmt && !has_fcs_err) {
  8515. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
  8516. + return false;
  8517. + }
  8518. +
  8519. +- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
  8520. +- ath10k_dbg(ATH10K_DBG_HTT,
  8521. +- "htt rx CAC running\n");
  8522. ++ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  8523. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  8524. + return false;
  8525. + }
  8526. +
  8527. + return true;
  8528. + }
  8529. +
  8530. ++static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  8531. ++ struct sk_buff_head *amsdu,
  8532. ++ struct ieee80211_rx_status *rx_status)
  8533. ++{
  8534. ++ if (skb_queue_empty(amsdu))
  8535. ++ return;
  8536. ++
  8537. ++ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  8538. ++ return;
  8539. ++
  8540. ++ __skb_queue_purge(amsdu);
  8541. ++}
  8542. ++
  8543. + static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  8544. + struct htt_rx_indication *rx)
  8545. + {
  8546. ++ struct ath10k *ar = htt->ar;
  8547. + struct ieee80211_rx_status *rx_status = &htt->rx_status;
  8548. + struct htt_rx_indication_mpdu_range *mpdu_ranges;
  8549. +- struct htt_rx_desc *rxd;
  8550. +- enum htt_rx_mpdu_status status;
  8551. +- struct ieee80211_hdr *hdr;
  8552. ++ struct sk_buff_head amsdu;
  8553. + int num_mpdu_ranges;
  8554. +- u32 attention;
  8555. + int fw_desc_len;
  8556. + u8 *fw_desc;
  8557. +- bool channel_set;
  8558. +- int i, j;
  8559. +- int ret;
  8560. ++ int i, ret, mpdu_count = 0;
  8561. +
  8562. + lockdep_assert_held(&htt->rx_ring.lock);
  8563. +
  8564. ++ if (htt->rx_confused)
  8565. ++ return;
  8566. ++
  8567. + fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  8568. + fw_desc = (u8 *)&rx->fw_desc;
  8569. +
  8570. +@@ -1161,201 +1500,82 @@ static void ath10k_htt_rx_handler(struct
  8571. + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  8572. + mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  8573. +
  8574. +- /* Fill this once, while this is per-ppdu */
  8575. +- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
  8576. +- memset(rx_status, 0, sizeof(*rx_status));
  8577. +- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  8578. +- rx->ppdu.combined_rssi;
  8579. +- }
  8580. +-
  8581. +- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
  8582. +- /* TSF available only in 32-bit */
  8583. +- rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
  8584. +- rx_status->flag |= RX_FLAG_MACTIME_END;
  8585. +- }
  8586. +-
  8587. +- channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
  8588. +-
  8589. +- if (channel_set) {
  8590. +- ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
  8591. +- rx->ppdu.info0,
  8592. +- __le32_to_cpu(rx->ppdu.info1),
  8593. +- __le32_to_cpu(rx->ppdu.info2),
  8594. +- rx_status);
  8595. +- }
  8596. +-
  8597. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  8598. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  8599. + rx, sizeof(*rx) +
  8600. + (sizeof(struct htt_rx_indication_mpdu_range) *
  8601. + num_mpdu_ranges));
  8602. +
  8603. +- for (i = 0; i < num_mpdu_ranges; i++) {
  8604. +- status = mpdu_ranges[i].mpdu_range_status;
  8605. +-
  8606. +- for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
  8607. +- struct sk_buff *msdu_head, *msdu_tail;
  8608. ++ for (i = 0; i < num_mpdu_ranges; i++)
  8609. ++ mpdu_count += mpdu_ranges[i].mpdu_count;
  8610. +
  8611. +- msdu_head = NULL;
  8612. +- msdu_tail = NULL;
  8613. +- ret = ath10k_htt_rx_amsdu_pop(htt,
  8614. +- &fw_desc,
  8615. +- &fw_desc_len,
  8616. +- &msdu_head,
  8617. +- &msdu_tail);
  8618. +-
  8619. +- if (ret < 0) {
  8620. +- ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
  8621. +- ret);
  8622. +- ath10k_htt_rx_free_msdu_chain(msdu_head);
  8623. +- continue;
  8624. +- }
  8625. +-
  8626. +- rxd = container_of((void *)msdu_head->data,
  8627. +- struct htt_rx_desc,
  8628. +- msdu_payload);
  8629. +- attention = __le32_to_cpu(rxd->attention.flags);
  8630. +-
  8631. +- if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
  8632. +- status,
  8633. +- channel_set,
  8634. +- attention)) {
  8635. +- ath10k_htt_rx_free_msdu_chain(msdu_head);
  8636. +- continue;
  8637. +- }
  8638. +-
  8639. +- if (ret > 0 &&
  8640. +- ath10k_unchain_msdu(msdu_head) < 0) {
  8641. +- ath10k_htt_rx_free_msdu_chain(msdu_head);
  8642. +- continue;
  8643. +- }
  8644. +-
  8645. +- if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
  8646. +- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
  8647. +- else
  8648. +- rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
  8649. +-
  8650. +- if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
  8651. +- rx_status->flag |= RX_FLAG_MMIC_ERROR;
  8652. +- else
  8653. +- rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
  8654. +-
  8655. +- hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
  8656. +-
  8657. +- if (ath10k_htt_rx_hdr_is_amsdu(hdr))
  8658. +- ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
  8659. +- else
  8660. +- ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
  8661. ++ while (mpdu_count--) {
  8662. ++ __skb_queue_head_init(&amsdu);
  8663. ++ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
  8664. ++ &fw_desc_len, &amsdu);
  8665. ++ if (ret < 0) {
  8666. ++ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  8667. ++ __skb_queue_purge(&amsdu);
  8668. ++ /* FIXME: It's probably a good idea to reboot the
  8669. ++ * device instead of leaving it inoperable.
  8670. ++ */
  8671. ++ htt->rx_confused = true;
  8672. ++ break;
  8673. + }
  8674. ++
  8675. ++ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
  8676. ++ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
  8677. ++ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  8678. ++ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  8679. ++ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  8680. + }
  8681. +
  8682. + tasklet_schedule(&htt->rx_replenish_task);
  8683. + }
  8684. +
  8685. + static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  8686. +- struct htt_rx_fragment_indication *frag)
  8687. ++ struct htt_rx_fragment_indication *frag)
  8688. + {
  8689. +- struct sk_buff *msdu_head, *msdu_tail;
  8690. +- enum htt_rx_mpdu_encrypt_type enctype;
  8691. +- struct htt_rx_desc *rxd;
  8692. +- enum rx_msdu_decap_format fmt;
  8693. ++ struct ath10k *ar = htt->ar;
  8694. + struct ieee80211_rx_status *rx_status = &htt->rx_status;
  8695. +- struct ieee80211_hdr *hdr;
  8696. ++ struct sk_buff_head amsdu;
  8697. + int ret;
  8698. +- bool tkip_mic_err;
  8699. +- bool decrypt_err;
  8700. + u8 *fw_desc;
  8701. +- int fw_desc_len, hdrlen, paramlen;
  8702. +- int trim;
  8703. ++ int fw_desc_len;
  8704. +
  8705. + fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  8706. + fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  8707. +
  8708. +- msdu_head = NULL;
  8709. +- msdu_tail = NULL;
  8710. ++ __skb_queue_head_init(&amsdu);
  8711. +
  8712. + spin_lock_bh(&htt->rx_ring.lock);
  8713. + ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  8714. +- &msdu_head, &msdu_tail);
  8715. ++ &amsdu);
  8716. + spin_unlock_bh(&htt->rx_ring.lock);
  8717. +
  8718. +- ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  8719. ++ tasklet_schedule(&htt->rx_replenish_task);
  8720. ++
  8721. ++ ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  8722. +
  8723. + if (ret) {
  8724. +- ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  8725. ++ ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  8726. + ret);
  8727. +- ath10k_htt_rx_free_msdu_chain(msdu_head);
  8728. ++ __skb_queue_purge(&amsdu);
  8729. + return;
  8730. + }
  8731. +
  8732. +- /* FIXME: implement signal strength */
  8733. +-
  8734. +- hdr = (struct ieee80211_hdr *)msdu_head->data;
  8735. +- rxd = (void *)msdu_head->data - sizeof(*rxd);
  8736. +- tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
  8737. +- RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  8738. +- decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
  8739. +- RX_ATTENTION_FLAGS_DECRYPT_ERR);
  8740. +- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  8741. +- RX_MSDU_START_INFO1_DECAP_FORMAT);
  8742. +-
  8743. +- if (fmt != RX_MSDU_DECAP_RAW) {
  8744. +- ath10k_warn("we dont support non-raw fragmented rx yet\n");
  8745. +- dev_kfree_skb_any(msdu_head);
  8746. +- goto end;
  8747. +- }
  8748. +-
  8749. +- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  8750. +- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  8751. +- ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
  8752. +- msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
  8753. +-
  8754. +- if (tkip_mic_err)
  8755. +- ath10k_warn("tkip mic error\n");
  8756. +-
  8757. +- if (decrypt_err) {
  8758. +- ath10k_warn("decryption err in fragmented rx\n");
  8759. +- dev_kfree_skb_any(msdu_head);
  8760. +- goto end;
  8761. +- }
  8762. +-
  8763. +- if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
  8764. +- hdrlen = ieee80211_hdrlen(hdr->frame_control);
  8765. +- paramlen = ath10k_htt_rx_crypto_param_len(enctype);
  8766. +-
  8767. +- /* It is more efficient to move the header than the payload */
  8768. +- memmove((void *)msdu_head->data + paramlen,
  8769. +- (void *)msdu_head->data,
  8770. +- hdrlen);
  8771. +- skb_pull(msdu_head, paramlen);
  8772. +- hdr = (struct ieee80211_hdr *)msdu_head->data;
  8773. +- }
  8774. +-
  8775. +- /* remove trailing FCS */
  8776. +- trim = 4;
  8777. +-
  8778. +- /* remove crypto trailer */
  8779. +- trim += ath10k_htt_rx_crypto_tail_len(enctype);
  8780. +-
  8781. +- /* last fragment of TKIP frags has MIC */
  8782. +- if (!ieee80211_has_morefrags(hdr->frame_control) &&
  8783. +- enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  8784. +- trim += 8;
  8785. +-
  8786. +- if (trim > msdu_head->len) {
  8787. +- ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
  8788. +- dev_kfree_skb_any(msdu_head);
  8789. +- goto end;
  8790. ++ if (skb_queue_len(&amsdu) != 1) {
  8791. ++ ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
  8792. ++ __skb_queue_purge(&amsdu);
  8793. ++ return;
  8794. + }
  8795. +
  8796. +- skb_trim(msdu_head, msdu_head->len - trim);
  8797. +-
  8798. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
  8799. +- msdu_head->data, msdu_head->len);
  8800. +- ath10k_process_rx(htt->ar, rx_status, msdu_head);
  8801. ++ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
  8802. ++ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  8803. ++ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  8804. ++ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  8805. +
  8806. +-end:
  8807. + if (fw_desc_len > 0) {
  8808. +- ath10k_dbg(ATH10K_DBG_HTT,
  8809. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  8810. + "expecting more fragmented rx in one indication %d\n",
  8811. + fw_desc_len);
  8812. + }
  8813. +@@ -1385,12 +1605,12 @@ static void ath10k_htt_rx_frm_tx_compl(s
  8814. + tx_done.discard = true;
  8815. + break;
  8816. + default:
  8817. +- ath10k_warn("unhandled tx completion status %d\n", status);
  8818. ++ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  8819. + tx_done.discard = true;
  8820. + break;
  8821. + }
  8822. +
  8823. +- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  8824. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  8825. + resp->data_tx_completion.num_msdus);
  8826. +
  8827. + for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  8828. +@@ -1400,6 +1620,274 @@ static void ath10k_htt_rx_frm_tx_compl(s
  8829. + }
  8830. + }
  8831. +
  8832. ++static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  8833. ++{
  8834. ++ struct htt_rx_addba *ev = &resp->rx_addba;
  8835. ++ struct ath10k_peer *peer;
  8836. ++ struct ath10k_vif *arvif;
  8837. ++ u16 info0, tid, peer_id;
  8838. ++
  8839. ++ info0 = __le16_to_cpu(ev->info0);
  8840. ++ tid = MS(info0, HTT_RX_BA_INFO0_TID);
  8841. ++ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  8842. ++
  8843. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  8844. ++ "htt rx addba tid %hu peer_id %hu size %hhu\n",
  8845. ++ tid, peer_id, ev->window_size);
  8846. ++
  8847. ++ spin_lock_bh(&ar->data_lock);
  8848. ++ peer = ath10k_peer_find_by_id(ar, peer_id);
  8849. ++ if (!peer) {
  8850. ++ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  8851. ++ peer_id);
  8852. ++ spin_unlock_bh(&ar->data_lock);
  8853. ++ return;
  8854. ++ }
  8855. ++
  8856. ++ arvif = ath10k_get_arvif(ar, peer->vdev_id);
  8857. ++ if (!arvif) {
  8858. ++ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  8859. ++ peer->vdev_id);
  8860. ++ spin_unlock_bh(&ar->data_lock);
  8861. ++ return;
  8862. ++ }
  8863. ++
  8864. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  8865. ++ "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  8866. ++ peer->addr, tid, ev->window_size);
  8867. ++
  8868. ++ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  8869. ++ spin_unlock_bh(&ar->data_lock);
  8870. ++}
  8871. ++
  8872. ++static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  8873. ++{
  8874. ++ struct htt_rx_delba *ev = &resp->rx_delba;
  8875. ++ struct ath10k_peer *peer;
  8876. ++ struct ath10k_vif *arvif;
  8877. ++ u16 info0, tid, peer_id;
  8878. ++
  8879. ++ info0 = __le16_to_cpu(ev->info0);
  8880. ++ tid = MS(info0, HTT_RX_BA_INFO0_TID);
  8881. ++ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  8882. ++
  8883. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  8884. ++ "htt rx delba tid %hu peer_id %hu\n",
  8885. ++ tid, peer_id);
  8886. ++
  8887. ++ spin_lock_bh(&ar->data_lock);
  8888. ++ peer = ath10k_peer_find_by_id(ar, peer_id);
  8889. ++ if (!peer) {
  8890. ++ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  8891. ++ peer_id);
  8892. ++ spin_unlock_bh(&ar->data_lock);
  8893. ++ return;
  8894. ++ }
  8895. ++
  8896. ++ arvif = ath10k_get_arvif(ar, peer->vdev_id);
  8897. ++ if (!arvif) {
  8898. ++ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  8899. ++ peer->vdev_id);
  8900. ++ spin_unlock_bh(&ar->data_lock);
  8901. ++ return;
  8902. ++ }
  8903. ++
  8904. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  8905. ++ "htt rx stop rx ba session sta %pM tid %hu\n",
  8906. ++ peer->addr, tid);
  8907. ++
  8908. ++ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  8909. ++ spin_unlock_bh(&ar->data_lock);
  8910. ++}
  8911. ++
  8912. ++static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  8913. ++ struct sk_buff_head *amsdu)
  8914. ++{
  8915. ++ struct sk_buff *msdu;
  8916. ++ struct htt_rx_desc *rxd;
  8917. ++
  8918. ++ if (skb_queue_empty(list))
  8919. ++ return -ENOBUFS;
  8920. ++
  8921. ++ if (WARN_ON(!skb_queue_empty(amsdu)))
  8922. ++ return -EINVAL;
  8923. ++
  8924. ++ while ((msdu = __skb_dequeue(list))) {
  8925. ++ __skb_queue_tail(amsdu, msdu);
  8926. ++
  8927. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  8928. ++ if (rxd->msdu_end.info0 &
  8929. ++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  8930. ++ break;
  8931. ++ }
  8932. ++
  8933. ++ msdu = skb_peek_tail(amsdu);
  8934. ++ rxd = (void *)msdu->data - sizeof(*rxd);
  8935. ++ if (!(rxd->msdu_end.info0 &
  8936. ++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  8937. ++ skb_queue_splice_init(amsdu, list);
  8938. ++ return -EAGAIN;
  8939. ++ }
  8940. ++
  8941. ++ return 0;
  8942. ++}
  8943. ++
  8944. ++static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  8945. ++ struct sk_buff *skb)
  8946. ++{
  8947. ++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  8948. ++
  8949. ++ if (!ieee80211_has_protected(hdr->frame_control))
  8950. ++ return;
  8951. ++
  8952. ++ /* Offloaded frames are already decrypted but firmware insists they are
  8953. ++ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  8954. ++ * will drop the frame.
  8955. ++ */
  8956. ++
  8957. ++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8958. ++ status->flag |= RX_FLAG_DECRYPTED |
  8959. ++ RX_FLAG_IV_STRIPPED |
  8960. ++ RX_FLAG_MMIC_STRIPPED;
  8961. ++}
  8962. ++
  8963. ++static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  8964. ++ struct sk_buff_head *list)
  8965. ++{
  8966. ++ struct ath10k_htt *htt = &ar->htt;
  8967. ++ struct ieee80211_rx_status *status = &htt->rx_status;
  8968. ++ struct htt_rx_offload_msdu *rx;
  8969. ++ struct sk_buff *msdu;
  8970. ++ size_t offset;
  8971. ++
  8972. ++ while ((msdu = __skb_dequeue(list))) {
  8973. ++ /* Offloaded frames don't have Rx descriptor. Instead they have
  8974. ++ * a short meta information header.
  8975. ++ */
  8976. ++
  8977. ++ rx = (void *)msdu->data;
  8978. ++
  8979. ++ skb_put(msdu, sizeof(*rx));
  8980. ++ skb_pull(msdu, sizeof(*rx));
  8981. ++
  8982. ++ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  8983. ++ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  8984. ++ dev_kfree_skb_any(msdu);
  8985. ++ continue;
  8986. ++ }
  8987. ++
  8988. ++ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  8989. ++
  8990. ++ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  8991. ++ * actual payload is unaligned. Align the frame. Otherwise
  8992. ++ * mac80211 complains. This shouldn't reduce performance much
  8993. ++ * because these offloaded frames are rare.
  8994. ++ */
  8995. ++ offset = 4 - ((unsigned long)msdu->data & 3);
  8996. ++ skb_put(msdu, offset);
  8997. ++ memmove(msdu->data + offset, msdu->data, msdu->len);
  8998. ++ skb_pull(msdu, offset);
  8999. ++
  9000. ++ /* FIXME: The frame is NWifi. Re-construct QoS Control
  9001. ++ * if possible later.
  9002. ++ */
  9003. ++
  9004. ++ memset(status, 0, sizeof(*status));
  9005. ++ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  9006. ++
  9007. ++ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  9008. ++ ath10k_htt_rx_h_channel(ar, status);
  9009. ++ ath10k_process_rx(ar, status, msdu);
  9010. ++ }
  9011. ++}
  9012. ++
  9013. ++static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  9014. ++{
  9015. ++ struct ath10k_htt *htt = &ar->htt;
  9016. ++ struct htt_resp *resp = (void *)skb->data;
  9017. ++ struct ieee80211_rx_status *status = &htt->rx_status;
  9018. ++ struct sk_buff_head list;
  9019. ++ struct sk_buff_head amsdu;
  9020. ++ u16 peer_id;
  9021. ++ u16 msdu_count;
  9022. ++ u8 vdev_id;
  9023. ++ u8 tid;
  9024. ++ bool offload;
  9025. ++ bool frag;
  9026. ++ int ret;
  9027. ++
  9028. ++ lockdep_assert_held(&htt->rx_ring.lock);
  9029. ++
  9030. ++ if (htt->rx_confused)
  9031. ++ return;
  9032. ++
  9033. ++ skb_pull(skb, sizeof(resp->hdr));
  9034. ++ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  9035. ++
  9036. ++ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  9037. ++ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  9038. ++ vdev_id = resp->rx_in_ord_ind.vdev_id;
  9039. ++ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  9040. ++ offload = !!(resp->rx_in_ord_ind.info &
  9041. ++ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  9042. ++ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  9043. ++
  9044. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  9045. ++ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  9046. ++ vdev_id, peer_id, tid, offload, frag, msdu_count);
  9047. ++
  9048. ++ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
  9049. ++ ath10k_warn(ar, "dropping invalid in order rx indication\n");
  9050. ++ return;
  9051. ++ }
  9052. ++
  9053. ++ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  9054. ++ * extracted and processed.
  9055. ++ */
  9056. ++ __skb_queue_head_init(&list);
  9057. ++ ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
  9058. ++ if (ret < 0) {
  9059. ++ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  9060. ++ htt->rx_confused = true;
  9061. ++ return;
  9062. ++ }
  9063. ++
  9064. ++ /* Offloaded frames are very different and need to be handled
  9065. ++ * separately.
  9066. ++ */
  9067. ++ if (offload)
  9068. ++ ath10k_htt_rx_h_rx_offload(ar, &list);
  9069. ++
  9070. ++ while (!skb_queue_empty(&list)) {
  9071. ++ __skb_queue_head_init(&amsdu);
  9072. ++ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  9073. ++ switch (ret) {
  9074. ++ case 0:
  9075. ++ /* Note: The in-order indication may report interleaved
  9076. ++ * frames from different PPDUs meaning reported rx rate
  9077. ++ * to mac80211 isn't accurate/reliable. It's still
  9078. ++ * better to report something than nothing though. This
  9079. ++ * should still give an idea about rx rate to the user.
  9080. ++ */
  9081. ++ ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
  9082. ++ ath10k_htt_rx_h_filter(ar, &amsdu, status);
  9083. ++ ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
  9084. ++ ath10k_htt_rx_h_deliver(ar, &amsdu, status);
  9085. ++ break;
  9086. ++ case -EAGAIN:
  9087. ++ /* fall through */
  9088. ++ default:
  9089. ++ /* Should not happen. */
  9090. ++ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  9091. ++ htt->rx_confused = true;
  9092. ++ __skb_queue_purge(&list);
  9093. ++ return;
  9094. ++ }
  9095. ++ }
  9096. ++
  9097. ++ tasklet_schedule(&htt->rx_replenish_task);
  9098. ++}
  9099. ++
  9100. + void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  9101. + {
  9102. + struct ath10k_htt *htt = &ar->htt;
  9103. +@@ -1407,9 +1895,9 @@ void ath10k_htt_t2h_msg_handler(struct a
  9104. +
  9105. + /* confirm alignment */
  9106. + if (!IS_ALIGNED((unsigned long)skb->data, 4))
  9107. +- ath10k_warn("unaligned htt message, expect trouble\n");
  9108. ++ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  9109. +
  9110. +- ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  9111. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  9112. + resp->hdr.msg_type);
  9113. + switch (resp->hdr.msg_type) {
  9114. + case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  9115. +@@ -1473,7 +1961,7 @@ void ath10k_htt_t2h_msg_handler(struct a
  9116. + struct ath10k *ar = htt->ar;
  9117. + struct htt_security_indication *ev = &resp->security_indication;
  9118. +
  9119. +- ath10k_dbg(ATH10K_DBG_HTT,
  9120. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  9121. + "sec ind peer_id %d unicast %d type %d\n",
  9122. + __le16_to_cpu(ev->peer_id),
  9123. + !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  9124. +@@ -1482,7 +1970,7 @@ void ath10k_htt_t2h_msg_handler(struct a
  9125. + break;
  9126. + }
  9127. + case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  9128. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  9129. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  9130. + skb->data, skb->len);
  9131. + ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  9132. + break;
  9133. +@@ -1491,16 +1979,55 @@ void ath10k_htt_t2h_msg_handler(struct a
  9134. + /* FIX THIS */
  9135. + break;
  9136. + case HTT_T2H_MSG_TYPE_STATS_CONF:
  9137. +- trace_ath10k_htt_stats(skb->data, skb->len);
  9138. ++ trace_ath10k_htt_stats(ar, skb->data, skb->len);
  9139. + break;
  9140. + case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  9141. ++ /* Firmware can return tx frames if it's unable to fully
  9142. ++ * process them and suspects host may be able to fix it. ath10k
  9143. ++ * sends all tx frames as already inspected so this shouldn't
  9144. ++ * happen unless fw has a bug.
  9145. ++ */
  9146. ++ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  9147. ++ break;
  9148. + case HTT_T2H_MSG_TYPE_RX_ADDBA:
  9149. ++ ath10k_htt_rx_addba(ar, resp);
  9150. ++ break;
  9151. + case HTT_T2H_MSG_TYPE_RX_DELBA:
  9152. +- case HTT_T2H_MSG_TYPE_RX_FLUSH:
  9153. ++ ath10k_htt_rx_delba(ar, resp);
  9154. ++ break;
  9155. ++ case HTT_T2H_MSG_TYPE_PKTLOG: {
  9156. ++ struct ath10k_pktlog_hdr *hdr =
  9157. ++ (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
  9158. ++
  9159. ++ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  9160. ++ sizeof(*hdr) +
  9161. ++ __le16_to_cpu(hdr->size));
  9162. ++ break;
  9163. ++ }
  9164. ++ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  9165. ++ /* Ignore this event because mac80211 takes care of Rx
  9166. ++ * aggregation reordering.
  9167. ++ */
  9168. ++ break;
  9169. ++ }
  9170. ++ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  9171. ++ spin_lock_bh(&htt->rx_ring.lock);
  9172. ++ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  9173. ++ spin_unlock_bh(&htt->rx_ring.lock);
  9174. ++ tasklet_schedule(&htt->txrx_compl_task);
  9175. ++ return;
  9176. ++ }
  9177. ++ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  9178. ++ /* FIXME: This WMI-TLV event is overlapping with 10.2
  9179. ++ * CHAN_CHANGE - both being 0xF. Neither is being used in
  9180. ++ * practice so no immediate action is necessary. Nevertheless
  9181. ++ * HTT may need an abstraction layer like WMI has one day.
  9182. ++ */
  9183. ++ break;
  9184. + default:
  9185. +- ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
  9186. +- resp->hdr.msg_type);
  9187. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  9188. ++ ath10k_warn(ar, "htt event (%d) not handled\n",
  9189. ++ resp->hdr.msg_type);
  9190. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  9191. + skb->data, skb->len);
  9192. + break;
  9193. + };
  9194. +@@ -1512,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct a
  9195. + static void ath10k_htt_txrx_compl_task(unsigned long ptr)
  9196. + {
  9197. + struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  9198. ++ struct ath10k *ar = htt->ar;
  9199. + struct htt_resp *resp;
  9200. + struct sk_buff *skb;
  9201. +
  9202. +@@ -1528,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(u
  9203. + ath10k_htt_rx_handler(htt, &resp->rx_ind);
  9204. + dev_kfree_skb_any(skb);
  9205. + }
  9206. ++
  9207. ++ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
  9208. ++ ath10k_htt_rx_in_ord_ind(ar, skb);
  9209. ++ dev_kfree_skb_any(skb);
  9210. ++ }
  9211. + spin_unlock_bh(&htt->rx_ring.lock);
  9212. + }
  9213. +--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
  9214. ++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
  9215. +@@ -56,98 +56,74 @@ exit:
  9216. + return ret;
  9217. + }
  9218. +
  9219. +-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
  9220. ++int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
  9221. + {
  9222. +- int msdu_id;
  9223. ++ struct ath10k *ar = htt->ar;
  9224. ++ int ret;
  9225. +
  9226. + lockdep_assert_held(&htt->tx_lock);
  9227. +
  9228. +- msdu_id = find_first_zero_bit(htt->used_msdu_ids,
  9229. +- htt->max_num_pending_tx);
  9230. +- if (msdu_id == htt->max_num_pending_tx)
  9231. +- return -ENOBUFS;
  9232. +-
  9233. +- ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
  9234. +- __set_bit(msdu_id, htt->used_msdu_ids);
  9235. +- return msdu_id;
  9236. ++ ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
  9237. ++
  9238. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
  9239. ++
  9240. ++ return ret;
  9241. + }
  9242. +
  9243. + void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
  9244. + {
  9245. ++ struct ath10k *ar = htt->ar;
  9246. ++
  9247. + lockdep_assert_held(&htt->tx_lock);
  9248. +
  9249. +- if (!test_bit(msdu_id, htt->used_msdu_ids))
  9250. +- ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
  9251. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
  9252. +
  9253. +- ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
  9254. +- __clear_bit(msdu_id, htt->used_msdu_ids);
  9255. ++ idr_remove(&htt->pending_tx, msdu_id);
  9256. + }
  9257. +
  9258. +-int ath10k_htt_tx_attach(struct ath10k_htt *htt)
  9259. ++int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
  9260. + {
  9261. +- spin_lock_init(&htt->tx_lock);
  9262. +- init_waitqueue_head(&htt->empty_tx_wq);
  9263. +-
  9264. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
  9265. +- htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
  9266. +- else
  9267. +- htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
  9268. ++ struct ath10k *ar = htt->ar;
  9269. +
  9270. +- ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
  9271. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
  9272. + htt->max_num_pending_tx);
  9273. +
  9274. +- htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
  9275. +- htt->max_num_pending_tx, GFP_KERNEL);
  9276. +- if (!htt->pending_tx)
  9277. +- return -ENOMEM;
  9278. +-
  9279. +- htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
  9280. +- BITS_TO_LONGS(htt->max_num_pending_tx),
  9281. +- GFP_KERNEL);
  9282. +- if (!htt->used_msdu_ids) {
  9283. +- kfree(htt->pending_tx);
  9284. +- return -ENOMEM;
  9285. +- }
  9286. ++ spin_lock_init(&htt->tx_lock);
  9287. ++ idr_init(&htt->pending_tx);
  9288. +
  9289. + htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
  9290. + sizeof(struct ath10k_htt_txbuf), 4, 0);
  9291. + if (!htt->tx_pool) {
  9292. +- kfree(htt->used_msdu_ids);
  9293. +- kfree(htt->pending_tx);
  9294. ++ idr_destroy(&htt->pending_tx);
  9295. + return -ENOMEM;
  9296. + }
  9297. +
  9298. + return 0;
  9299. + }
  9300. +
  9301. +-static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
  9302. ++static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
  9303. + {
  9304. ++ struct ath10k *ar = ctx;
  9305. ++ struct ath10k_htt *htt = &ar->htt;
  9306. + struct htt_tx_done tx_done = {0};
  9307. +- int msdu_id;
  9308. +-
  9309. +- spin_lock_bh(&htt->tx_lock);
  9310. +- for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
  9311. +- if (!test_bit(msdu_id, htt->used_msdu_ids))
  9312. +- continue;
  9313. +
  9314. +- ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
  9315. +- msdu_id);
  9316. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
  9317. +
  9318. +- tx_done.discard = 1;
  9319. +- tx_done.msdu_id = msdu_id;
  9320. ++ tx_done.discard = 1;
  9321. ++ tx_done.msdu_id = msdu_id;
  9322. +
  9323. +- ath10k_txrx_tx_unref(htt, &tx_done);
  9324. +- }
  9325. ++ spin_lock_bh(&htt->tx_lock);
  9326. ++ ath10k_txrx_tx_unref(htt, &tx_done);
  9327. + spin_unlock_bh(&htt->tx_lock);
  9328. ++
  9329. ++ return 0;
  9330. + }
  9331. +
  9332. +-void ath10k_htt_tx_detach(struct ath10k_htt *htt)
  9333. ++void ath10k_htt_tx_free(struct ath10k_htt *htt)
  9334. + {
  9335. +- ath10k_htt_tx_cleanup_pending(htt);
  9336. +- kfree(htt->pending_tx);
  9337. +- kfree(htt->used_msdu_ids);
  9338. ++ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
  9339. ++ idr_destroy(&htt->pending_tx);
  9340. + dma_pool_destroy(htt->tx_pool);
  9341. +- return;
  9342. + }
  9343. +
  9344. + void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  9345. +@@ -157,6 +133,7 @@ void ath10k_htt_htc_tx_complete(struct a
  9346. +
  9347. + int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
  9348. + {
  9349. ++ struct ath10k *ar = htt->ar;
  9350. + struct sk_buff *skb;
  9351. + struct htt_cmd *cmd;
  9352. + int len = 0;
  9353. +@@ -165,7 +142,7 @@ int ath10k_htt_h2t_ver_req_msg(struct at
  9354. + len += sizeof(cmd->hdr);
  9355. + len += sizeof(cmd->ver_req);
  9356. +
  9357. +- skb = ath10k_htc_alloc_skb(len);
  9358. ++ skb = ath10k_htc_alloc_skb(ar, len);
  9359. + if (!skb)
  9360. + return -ENOMEM;
  9361. +
  9362. +@@ -184,6 +161,7 @@ int ath10k_htt_h2t_ver_req_msg(struct at
  9363. +
  9364. + int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
  9365. + {
  9366. ++ struct ath10k *ar = htt->ar;
  9367. + struct htt_stats_req *req;
  9368. + struct sk_buff *skb;
  9369. + struct htt_cmd *cmd;
  9370. +@@ -192,7 +170,7 @@ int ath10k_htt_h2t_stats_req(struct ath1
  9371. + len += sizeof(cmd->hdr);
  9372. + len += sizeof(cmd->stats_req);
  9373. +
  9374. +- skb = ath10k_htc_alloc_skb(len);
  9375. ++ skb = ath10k_htc_alloc_skb(ar, len);
  9376. + if (!skb)
  9377. + return -ENOMEM;
  9378. +
  9379. +@@ -214,7 +192,8 @@ int ath10k_htt_h2t_stats_req(struct ath1
  9380. +
  9381. + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
  9382. + if (ret) {
  9383. +- ath10k_warn("failed to send htt type stats request: %d", ret);
  9384. ++ ath10k_warn(ar, "failed to send htt type stats request: %d",
  9385. ++ ret);
  9386. + dev_kfree_skb_any(skb);
  9387. + return ret;
  9388. + }
  9389. +@@ -224,6 +203,7 @@ int ath10k_htt_h2t_stats_req(struct ath1
  9390. +
  9391. + int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
  9392. + {
  9393. ++ struct ath10k *ar = htt->ar;
  9394. + struct sk_buff *skb;
  9395. + struct htt_cmd *cmd;
  9396. + struct htt_rx_ring_setup_ring *ring;
  9397. +@@ -242,7 +222,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struc
  9398. +
  9399. + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
  9400. + + (sizeof(*ring) * num_rx_ring);
  9401. +- skb = ath10k_htc_alloc_skb(len);
  9402. ++ skb = ath10k_htc_alloc_skb(ar, len);
  9403. + if (!skb)
  9404. + return -ENOMEM;
  9405. +
  9406. +@@ -307,9 +287,57 @@ int ath10k_htt_send_rx_ring_cfg_ll(struc
  9407. + return 0;
  9408. + }
  9409. +
  9410. ++int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
  9411. ++ u8 max_subfrms_ampdu,
  9412. ++ u8 max_subfrms_amsdu)
  9413. ++{
  9414. ++ struct ath10k *ar = htt->ar;
  9415. ++ struct htt_aggr_conf *aggr_conf;
  9416. ++ struct sk_buff *skb;
  9417. ++ struct htt_cmd *cmd;
  9418. ++ int len;
  9419. ++ int ret;
  9420. ++
  9421. ++ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
  9422. ++
  9423. ++ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
  9424. ++ return -EINVAL;
  9425. ++
  9426. ++ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
  9427. ++ return -EINVAL;
  9428. ++
  9429. ++ len = sizeof(cmd->hdr);
  9430. ++ len += sizeof(cmd->aggr_conf);
  9431. ++
  9432. ++ skb = ath10k_htc_alloc_skb(ar, len);
  9433. ++ if (!skb)
  9434. ++ return -ENOMEM;
  9435. ++
  9436. ++ skb_put(skb, len);
  9437. ++ cmd = (struct htt_cmd *)skb->data;
  9438. ++ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
  9439. ++
  9440. ++ aggr_conf = &cmd->aggr_conf;
  9441. ++ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
  9442. ++ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
  9443. ++
  9444. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
  9445. ++ aggr_conf->max_num_amsdu_subframes,
  9446. ++ aggr_conf->max_num_ampdu_subframes);
  9447. ++
  9448. ++ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
  9449. ++ if (ret) {
  9450. ++ dev_kfree_skb_any(skb);
  9451. ++ return ret;
  9452. ++ }
  9453. ++
  9454. ++ return 0;
  9455. ++}
  9456. ++
  9457. + int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
  9458. + {
  9459. +- struct device *dev = htt->ar->dev;
  9460. ++ struct ath10k *ar = htt->ar;
  9461. ++ struct device *dev = ar->dev;
  9462. + struct sk_buff *txdesc = NULL;
  9463. + struct htt_cmd *cmd;
  9464. + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
  9465. +@@ -318,7 +346,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
  9466. + int msdu_id = -1;
  9467. + int res;
  9468. +
  9469. +-
  9470. + res = ath10k_htt_tx_inc_pending(htt);
  9471. + if (res)
  9472. + goto err;
  9473. +@@ -327,16 +354,15 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
  9474. + len += sizeof(cmd->mgmt_tx);
  9475. +
  9476. + spin_lock_bh(&htt->tx_lock);
  9477. +- res = ath10k_htt_tx_alloc_msdu_id(htt);
  9478. ++ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
  9479. + if (res < 0) {
  9480. + spin_unlock_bh(&htt->tx_lock);
  9481. + goto err_tx_dec;
  9482. + }
  9483. + msdu_id = res;
  9484. +- htt->pending_tx[msdu_id] = msdu;
  9485. + spin_unlock_bh(&htt->tx_lock);
  9486. +
  9487. +- txdesc = ath10k_htc_alloc_skb(len);
  9488. ++ txdesc = ath10k_htc_alloc_skb(ar, len);
  9489. + if (!txdesc) {
  9490. + res = -ENOMEM;
  9491. + goto err_free_msdu_id;
  9492. +@@ -372,7 +398,6 @@ err_free_txdesc:
  9493. + dev_kfree_skb_any(txdesc);
  9494. + err_free_msdu_id:
  9495. + spin_lock_bh(&htt->tx_lock);
  9496. +- htt->pending_tx[msdu_id] = NULL;
  9497. + ath10k_htt_tx_free_msdu_id(htt, msdu_id);
  9498. + spin_unlock_bh(&htt->tx_lock);
  9499. + err_tx_dec:
  9500. +@@ -383,7 +408,8 @@ err:
  9501. +
  9502. + int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
  9503. + {
  9504. +- struct device *dev = htt->ar->dev;
  9505. ++ struct ath10k *ar = htt->ar;
  9506. ++ struct device *dev = ar->dev;
  9507. + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
  9508. + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
  9509. + struct ath10k_hif_sg_item sg_items[2];
  9510. +@@ -403,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt
  9511. + goto err;
  9512. +
  9513. + spin_lock_bh(&htt->tx_lock);
  9514. +- res = ath10k_htt_tx_alloc_msdu_id(htt);
  9515. ++ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
  9516. + if (res < 0) {
  9517. + spin_unlock_bh(&htt->tx_lock);
  9518. + goto err_tx_dec;
  9519. + }
  9520. + msdu_id = res;
  9521. +- htt->pending_tx[msdu_id] = msdu;
  9522. + spin_unlock_bh(&htt->tx_lock);
  9523. +
  9524. + prefetch_len = min(htt->prefetch_len, msdu->len);
  9525. +@@ -423,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt
  9526. +
  9527. + skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
  9528. + &paddr);
  9529. +- if (!skb_cb->htt.txbuf)
  9530. ++ if (!skb_cb->htt.txbuf) {
  9531. ++ res = -ENOMEM;
  9532. + goto err_free_msdu_id;
  9533. ++ }
  9534. + skb_cb->htt.txbuf_paddr = paddr;
  9535. +
  9536. ++ if ((ieee80211_is_action(hdr->frame_control) ||
  9537. ++ ieee80211_is_deauth(hdr->frame_control) ||
  9538. ++ ieee80211_is_disassoc(hdr->frame_control)) &&
  9539. ++ ieee80211_has_protected(hdr->frame_control))
  9540. ++ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
  9541. ++
  9542. + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
  9543. + DMA_TO_DEVICE);
  9544. + res = dma_mapping_error(dev, skb_cb->paddr);
  9545. +@@ -482,8 +515,16 @@ int ath10k_htt_tx(struct ath10k_htt *htt
  9546. +
  9547. + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
  9548. + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
  9549. +- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
  9550. +- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
  9551. ++ if (msdu->ip_summed == CHECKSUM_PARTIAL) {
  9552. ++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
  9553. ++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
  9554. ++ }
  9555. ++
  9556. ++ /* Prevent firmware from sending up tx inspection requests. There's
  9557. ++ * nothing ath10k can do with frames requested for inspection so force
  9558. ++ * it to simply rely a regular tx completion with discard status.
  9559. ++ */
  9560. ++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
  9561. +
  9562. + skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
  9563. + skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
  9564. +@@ -491,14 +532,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt
  9565. + skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
  9566. + skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
  9567. + skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
  9568. +- skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
  9569. ++ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
  9570. ++ skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
  9571. +
  9572. +- ath10k_dbg(ATH10K_DBG_HTT,
  9573. +- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
  9574. ++ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
  9575. ++ ath10k_dbg(ar, ATH10K_DBG_HTT,
  9576. ++ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
  9577. + flags0, flags1, msdu->len, msdu_id, frags_paddr,
  9578. +- (u32)skb_cb->paddr, vdev_id, tid);
  9579. +- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
  9580. ++ (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
  9581. ++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
  9582. + msdu->data, msdu->len);
  9583. ++ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
  9584. ++ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
  9585. +
  9586. + sg_items[0].transfer_id = 0;
  9587. + sg_items[0].transfer_context = NULL;
  9588. +@@ -531,7 +576,6 @@ err_free_txbuf:
  9589. + skb_cb->htt.txbuf_paddr);
  9590. + err_free_msdu_id:
  9591. + spin_lock_bh(&htt->tx_lock);
  9592. +- htt->pending_tx[msdu_id] = NULL;
  9593. + ath10k_htt_tx_free_msdu_id(htt, msdu_id);
  9594. + spin_unlock_bh(&htt->tx_lock);
  9595. + err_tx_dec:
  9596. +--- a/drivers/net/wireless/ath/ath10k/hw.h
  9597. ++++ b/drivers/net/wireless/ath/ath10k/hw.h
  9598. +@@ -20,24 +20,73 @@
  9599. +
  9600. + #include "targaddrs.h"
  9601. +
  9602. ++#define ATH10K_FW_DIR "ath10k"
  9603. ++
  9604. + /* QCA988X 1.0 definitions (unsupported) */
  9605. + #define QCA988X_HW_1_0_CHIP_ID_REV 0x0
  9606. +
  9607. + /* QCA988X 2.0 definitions */
  9608. + #define QCA988X_HW_2_0_VERSION 0x4100016c
  9609. + #define QCA988X_HW_2_0_CHIP_ID_REV 0x2
  9610. +-#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
  9611. ++#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
  9612. + #define QCA988X_HW_2_0_FW_FILE "firmware.bin"
  9613. +-#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
  9614. + #define QCA988X_HW_2_0_OTP_FILE "otp.bin"
  9615. + #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
  9616. + #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
  9617. +
  9618. ++/* QCA6174 target BMI version signatures */
  9619. ++#define QCA6174_HW_1_0_VERSION 0x05000000
  9620. ++#define QCA6174_HW_1_1_VERSION 0x05000001
  9621. ++#define QCA6174_HW_1_3_VERSION 0x05000003
  9622. ++#define QCA6174_HW_2_1_VERSION 0x05010000
  9623. ++#define QCA6174_HW_3_0_VERSION 0x05020000
  9624. ++#define QCA6174_HW_3_2_VERSION 0x05030000
  9625. ++
  9626. ++enum qca6174_pci_rev {
  9627. ++ QCA6174_PCI_REV_1_1 = 0x11,
  9628. ++ QCA6174_PCI_REV_1_3 = 0x13,
  9629. ++ QCA6174_PCI_REV_2_0 = 0x20,
  9630. ++ QCA6174_PCI_REV_3_0 = 0x30,
  9631. ++};
  9632. ++
  9633. ++enum qca6174_chip_id_rev {
  9634. ++ QCA6174_HW_1_0_CHIP_ID_REV = 0,
  9635. ++ QCA6174_HW_1_1_CHIP_ID_REV = 1,
  9636. ++ QCA6174_HW_1_3_CHIP_ID_REV = 2,
  9637. ++ QCA6174_HW_2_1_CHIP_ID_REV = 4,
  9638. ++ QCA6174_HW_2_2_CHIP_ID_REV = 5,
  9639. ++ QCA6174_HW_3_0_CHIP_ID_REV = 8,
  9640. ++ QCA6174_HW_3_1_CHIP_ID_REV = 9,
  9641. ++ QCA6174_HW_3_2_CHIP_ID_REV = 10,
  9642. ++};
  9643. ++
  9644. ++#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
  9645. ++#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
  9646. ++#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
  9647. ++#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
  9648. ++#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
  9649. ++
  9650. ++#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
  9651. ++#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
  9652. ++#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
  9653. ++#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
  9654. ++#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
  9655. ++
  9656. + #define ATH10K_FW_API2_FILE "firmware-2.bin"
  9657. ++#define ATH10K_FW_API3_FILE "firmware-3.bin"
  9658. ++
  9659. ++/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
  9660. ++#define ATH10K_FW_API4_FILE "firmware-4.bin"
  9661. ++
  9662. ++#define ATH10K_FW_UTF_FILE "utf.bin"
  9663. +
  9664. + /* includes also the null byte */
  9665. + #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
  9666. +
  9667. ++#define REG_DUMP_COUNT_QCA988X 60
  9668. ++
  9669. ++#define QCA988X_CAL_DATA_LEN 2116
  9670. ++
  9671. + struct ath10k_fw_ie {
  9672. + __le32 id;
  9673. + __le32 len;
  9674. +@@ -50,8 +99,57 @@ enum ath10k_fw_ie_type {
  9675. + ATH10K_FW_IE_FEATURES = 2,
  9676. + ATH10K_FW_IE_FW_IMAGE = 3,
  9677. + ATH10K_FW_IE_OTP_IMAGE = 4,
  9678. ++
  9679. ++ /* WMI "operations" interface version, 32 bit value. Supported from
  9680. ++ * FW API 4 and above.
  9681. ++ */
  9682. ++ ATH10K_FW_IE_WMI_OP_VERSION = 5,
  9683. ++};
  9684. ++
  9685. ++enum ath10k_fw_wmi_op_version {
  9686. ++ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
  9687. ++
  9688. ++ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
  9689. ++ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
  9690. ++ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
  9691. ++ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
  9692. ++ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
  9693. ++
  9694. ++ /* keep last */
  9695. ++ ATH10K_FW_WMI_OP_VERSION_MAX,
  9696. ++};
  9697. ++
  9698. ++enum ath10k_hw_rev {
  9699. ++ ATH10K_HW_QCA988X,
  9700. ++ ATH10K_HW_QCA6174,
  9701. ++};
  9702. ++
  9703. ++struct ath10k_hw_regs {
  9704. ++ u32 rtc_state_cold_reset_mask;
  9705. ++ u32 rtc_soc_base_address;
  9706. ++ u32 rtc_wmac_base_address;
  9707. ++ u32 soc_core_base_address;
  9708. ++ u32 ce_wrapper_base_address;
  9709. ++ u32 ce0_base_address;
  9710. ++ u32 ce1_base_address;
  9711. ++ u32 ce2_base_address;
  9712. ++ u32 ce3_base_address;
  9713. ++ u32 ce4_base_address;
  9714. ++ u32 ce5_base_address;
  9715. ++ u32 ce6_base_address;
  9716. ++ u32 ce7_base_address;
  9717. ++ u32 soc_reset_control_si0_rst_mask;
  9718. ++ u32 soc_reset_control_ce_rst_mask;
  9719. ++ u32 soc_chip_id_address;
  9720. ++ u32 scratch_3_address;
  9721. + };
  9722. +
  9723. ++extern const struct ath10k_hw_regs qca988x_regs;
  9724. ++extern const struct ath10k_hw_regs qca6174_regs;
  9725. ++
  9726. ++#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
  9727. ++#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
  9728. ++
  9729. + /* Known pecularities:
  9730. + * - current FW doesn't support raw rx mode (last tested v599)
  9731. + * - current FW dumps upon raw tx mode (last tested v599)
  9732. +@@ -73,6 +171,15 @@ enum ath10k_mcast2ucast_mode {
  9733. + ATH10K_MCAST2UCAST_ENABLED = 1,
  9734. + };
  9735. +
  9736. ++struct ath10k_pktlog_hdr {
  9737. ++ __le16 flags;
  9738. ++ __le16 missed_cnt;
  9739. ++ __le16 log_type;
  9740. ++ __le16 size;
  9741. ++ __le32 timestamp;
  9742. ++ u8 payload[0];
  9743. ++} __packed;
  9744. ++
  9745. + /* Target specific defines for MAIN firmware */
  9746. + #define TARGET_NUM_VDEVS 8
  9747. + #define TARGET_NUM_PEER_AST 2
  9748. +@@ -80,11 +187,13 @@ enum ath10k_mcast2ucast_mode {
  9749. + #define TARGET_DMA_BURST_SIZE 0
  9750. + #define TARGET_MAC_AGGR_DELIM 0
  9751. + #define TARGET_AST_SKID_LIMIT 16
  9752. +-#define TARGET_NUM_PEERS 16
  9753. ++#define TARGET_NUM_STATIONS 16
  9754. ++#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
  9755. ++ (TARGET_NUM_VDEVS))
  9756. + #define TARGET_NUM_OFFLOAD_PEERS 0
  9757. + #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
  9758. + #define TARGET_NUM_PEER_KEYS 2
  9759. +-#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
  9760. ++#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
  9761. + #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
  9762. + #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
  9763. + #define TARGET_RX_TIMEOUT_LO_PRI 100
  9764. +@@ -115,12 +224,15 @@ enum ath10k_mcast2ucast_mode {
  9765. + #define TARGET_10X_DMA_BURST_SIZE 0
  9766. + #define TARGET_10X_MAC_AGGR_DELIM 0
  9767. + #define TARGET_10X_AST_SKID_LIMIT 16
  9768. +-#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
  9769. +-#define TARGET_10X_NUM_PEERS_MAX 128
  9770. ++#define TARGET_10X_NUM_STATIONS 128
  9771. ++#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
  9772. ++ (TARGET_10X_NUM_VDEVS))
  9773. + #define TARGET_10X_NUM_OFFLOAD_PEERS 0
  9774. + #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
  9775. + #define TARGET_10X_NUM_PEER_KEYS 2
  9776. +-#define TARGET_10X_NUM_TIDS 256
  9777. ++#define TARGET_10X_NUM_TIDS_MAX 256
  9778. ++#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
  9779. ++ (TARGET_10X_NUM_PEERS) * 2)
  9780. + #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
  9781. + #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
  9782. + #define TARGET_10X_RX_TIMEOUT_LO_PRI 100
  9783. +@@ -140,6 +252,18 @@ enum ath10k_mcast2ucast_mode {
  9784. + #define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
  9785. + #define TARGET_10X_MAX_FRAG_ENTRIES 0
  9786. +
  9787. ++/* 10.2 parameters */
  9788. ++#define TARGET_10_2_DMA_BURST_SIZE 1
  9789. ++
  9790. ++/* Target specific defines for WMI-TLV firmware */
  9791. ++#define TARGET_TLV_NUM_VDEVS 3
  9792. ++#define TARGET_TLV_NUM_STATIONS 32
  9793. ++#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
  9794. ++ (TARGET_TLV_NUM_VDEVS) + \
  9795. ++ 2)
  9796. ++#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
  9797. ++#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
  9798. ++
  9799. + /* Number of Copy Engines supported */
  9800. + #define CE_COUNT 8
  9801. +
  9802. +@@ -170,7 +294,7 @@ enum ath10k_mcast2ucast_mode {
  9803. + /* as of IP3.7.1 */
  9804. + #define RTC_STATE_V_ON 3
  9805. +
  9806. +-#define RTC_STATE_COLD_RESET_MASK 0x00000400
  9807. ++#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
  9808. + #define RTC_STATE_V_LSB 0
  9809. + #define RTC_STATE_V_MASK 0x00000007
  9810. + #define RTC_STATE_ADDRESS 0x0000
  9811. +@@ -179,12 +303,12 @@ enum ath10k_mcast2ucast_mode {
  9812. + #define PCIE_SOC_WAKE_RESET 0x00000000
  9813. + #define SOC_GLOBAL_RESET_ADDRESS 0x0008
  9814. +
  9815. +-#define RTC_SOC_BASE_ADDRESS 0x00004000
  9816. +-#define RTC_WMAC_BASE_ADDRESS 0x00005000
  9817. ++#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
  9818. ++#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
  9819. + #define MAC_COEX_BASE_ADDRESS 0x00006000
  9820. + #define BT_COEX_BASE_ADDRESS 0x00007000
  9821. + #define SOC_PCIE_BASE_ADDRESS 0x00008000
  9822. +-#define SOC_CORE_BASE_ADDRESS 0x00009000
  9823. ++#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
  9824. + #define WLAN_UART_BASE_ADDRESS 0x0000c000
  9825. + #define WLAN_SI_BASE_ADDRESS 0x00010000
  9826. + #define WLAN_GPIO_BASE_ADDRESS 0x00014000
  9827. +@@ -193,23 +317,23 @@ enum ath10k_mcast2ucast_mode {
  9828. + #define EFUSE_BASE_ADDRESS 0x00030000
  9829. + #define FPGA_REG_BASE_ADDRESS 0x00039000
  9830. + #define WLAN_UART2_BASE_ADDRESS 0x00054c00
  9831. +-#define CE_WRAPPER_BASE_ADDRESS 0x00057000
  9832. +-#define CE0_BASE_ADDRESS 0x00057400
  9833. +-#define CE1_BASE_ADDRESS 0x00057800
  9834. +-#define CE2_BASE_ADDRESS 0x00057c00
  9835. +-#define CE3_BASE_ADDRESS 0x00058000
  9836. +-#define CE4_BASE_ADDRESS 0x00058400
  9837. +-#define CE5_BASE_ADDRESS 0x00058800
  9838. +-#define CE6_BASE_ADDRESS 0x00058c00
  9839. +-#define CE7_BASE_ADDRESS 0x00059000
  9840. ++#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
  9841. ++#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
  9842. ++#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
  9843. ++#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
  9844. ++#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
  9845. ++#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
  9846. ++#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
  9847. ++#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
  9848. ++#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
  9849. + #define DBI_BASE_ADDRESS 0x00060000
  9850. + #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
  9851. + #define PCIE_LOCAL_BASE_ADDRESS 0x00080000
  9852. +
  9853. + #define SOC_RESET_CONTROL_ADDRESS 0x00000000
  9854. + #define SOC_RESET_CONTROL_OFFSET 0x00000000
  9855. +-#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
  9856. +-#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
  9857. ++#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
  9858. ++#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
  9859. + #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
  9860. + #define SOC_CPU_CLOCK_OFFSET 0x00000020
  9861. + #define SOC_CPU_CLOCK_STANDARD_LSB 0
  9862. +@@ -223,7 +347,7 @@ enum ath10k_mcast2ucast_mode {
  9863. + #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
  9864. + #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
  9865. +
  9866. +-#define SOC_CHIP_ID_ADDRESS 0x000000ec
  9867. ++#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
  9868. + #define SOC_CHIP_ID_REV_LSB 8
  9869. + #define SOC_CHIP_ID_REV_MASK 0x00000f00
  9870. +
  9871. +@@ -274,11 +398,12 @@ enum ath10k_mcast2ucast_mode {
  9872. + #define SI_RX_DATA1_OFFSET 0x00000014
  9873. +
  9874. + #define CORE_CTRL_CPU_INTR_MASK 0x00002000
  9875. ++#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
  9876. + #define CORE_CTRL_ADDRESS 0x0000
  9877. + #define PCIE_INTR_ENABLE_ADDRESS 0x0008
  9878. + #define PCIE_INTR_CAUSE_ADDRESS 0x000c
  9879. + #define PCIE_INTR_CLR_ADDRESS 0x0014
  9880. +-#define SCRATCH_3_ADDRESS 0x0030
  9881. ++#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
  9882. + #define CPU_INTR_ADDRESS 0x0010
  9883. +
  9884. + /* Firmware indications to the Host via SCRATCH_3 register. */
  9885. +--- a/drivers/net/wireless/ath/ath10k/mac.c
  9886. ++++ b/drivers/net/wireless/ath/ath10k/mac.c
  9887. +@@ -26,6 +26,9 @@
  9888. + #include "wmi.h"
  9889. + #include "htt.h"
  9890. + #include "txrx.h"
  9891. ++#include "testmode.h"
  9892. ++#include "wmi.h"
  9893. ++#include "wmi-ops.h"
  9894. +
  9895. + /**********/
  9896. + /* Crypto */
  9897. +@@ -34,8 +37,9 @@
  9898. + static int ath10k_send_key(struct ath10k_vif *arvif,
  9899. + struct ieee80211_key_conf *key,
  9900. + enum set_key_cmd cmd,
  9901. +- const u8 *macaddr)
  9902. ++ const u8 *macaddr, bool def_idx)
  9903. + {
  9904. ++ struct ath10k *ar = arvif->ar;
  9905. + struct wmi_vdev_install_key_arg arg = {
  9906. + .vdev_id = arvif->vdev_id,
  9907. + .key_idx = key->keyidx,
  9908. +@@ -54,7 +58,7 @@ static int ath10k_send_key(struct ath10k
  9909. + switch (key->cipher) {
  9910. + case WLAN_CIPHER_SUITE_CCMP:
  9911. + arg.key_cipher = WMI_CIPHER_AES_CCM;
  9912. +- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
  9913. ++ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
  9914. + break;
  9915. + case WLAN_CIPHER_SUITE_TKIP:
  9916. + arg.key_cipher = WMI_CIPHER_TKIP;
  9917. +@@ -68,9 +72,12 @@ static int ath10k_send_key(struct ath10k
  9918. + * Otherwise pairwise key must be set */
  9919. + if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
  9920. + arg.key_flags = WMI_KEY_PAIRWISE;
  9921. ++
  9922. ++ if (def_idx)
  9923. ++ arg.key_flags |= WMI_KEY_TX_USAGE;
  9924. + break;
  9925. + default:
  9926. +- ath10k_warn("cipher %d is not supported\n", key->cipher);
  9927. ++ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
  9928. + return -EOPNOTSUPP;
  9929. + }
  9930. +
  9931. +@@ -85,7 +92,7 @@ static int ath10k_send_key(struct ath10k
  9932. + static int ath10k_install_key(struct ath10k_vif *arvif,
  9933. + struct ieee80211_key_conf *key,
  9934. + enum set_key_cmd cmd,
  9935. +- const u8 *macaddr)
  9936. ++ const u8 *macaddr, bool def_idx)
  9937. + {
  9938. + struct ath10k *ar = arvif->ar;
  9939. + int ret;
  9940. +@@ -94,7 +101,7 @@ static int ath10k_install_key(struct ath
  9941. +
  9942. + reinit_completion(&ar->install_key_done);
  9943. +
  9944. +- ret = ath10k_send_key(arvif, key, cmd, macaddr);
  9945. ++ ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
  9946. + if (ret)
  9947. + return ret;
  9948. +
  9949. +@@ -112,6 +119,7 @@ static int ath10k_install_peer_wep_keys(
  9950. + struct ath10k_peer *peer;
  9951. + int ret;
  9952. + int i;
  9953. ++ bool def_idx;
  9954. +
  9955. + lockdep_assert_held(&ar->conf_mutex);
  9956. +
  9957. +@@ -125,13 +133,20 @@ static int ath10k_install_peer_wep_keys(
  9958. + for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
  9959. + if (arvif->wep_keys[i] == NULL)
  9960. + continue;
  9961. ++ /* set TX_USAGE flag for default key id */
  9962. ++ if (arvif->def_wep_key_idx == i)
  9963. ++ def_idx = true;
  9964. ++ else
  9965. ++ def_idx = false;
  9966. +
  9967. + ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
  9968. +- addr);
  9969. ++ addr, def_idx);
  9970. + if (ret)
  9971. + return ret;
  9972. +
  9973. ++ spin_lock_bh(&ar->data_lock);
  9974. + peer->keys[i] = arvif->wep_keys[i];
  9975. ++ spin_unlock_bh(&ar->data_lock);
  9976. + }
  9977. +
  9978. + return 0;
  9979. +@@ -159,21 +174,49 @@ static int ath10k_clear_peer_keys(struct
  9980. + if (peer->keys[i] == NULL)
  9981. + continue;
  9982. +
  9983. ++ /* key flags are not required to delete the key */
  9984. + ret = ath10k_install_key(arvif, peer->keys[i],
  9985. +- DISABLE_KEY, addr);
  9986. ++ DISABLE_KEY, addr, false);
  9987. + if (ret && first_errno == 0)
  9988. + first_errno = ret;
  9989. +
  9990. + if (ret)
  9991. +- ath10k_warn("failed to remove peer wep key %d: %d\n",
  9992. ++ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
  9993. + i, ret);
  9994. +
  9995. ++ spin_lock_bh(&ar->data_lock);
  9996. + peer->keys[i] = NULL;
  9997. ++ spin_unlock_bh(&ar->data_lock);
  9998. + }
  9999. +
  10000. + return first_errno;
  10001. + }
  10002. +
  10003. ++bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
  10004. ++ u8 keyidx)
  10005. ++{
  10006. ++ struct ath10k_peer *peer;
  10007. ++ int i;
  10008. ++
  10009. ++ lockdep_assert_held(&ar->data_lock);
  10010. ++
  10011. ++ /* We don't know which vdev this peer belongs to,
  10012. ++ * since WMI doesn't give us that information.
  10013. ++ *
  10014. ++ * FIXME: multi-bss needs to be handled.
  10015. ++ */
  10016. ++ peer = ath10k_peer_find(ar, 0, addr);
  10017. ++ if (!peer)
  10018. ++ return false;
  10019. ++
  10020. ++ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
  10021. ++ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
  10022. ++ return true;
  10023. ++ }
  10024. ++
  10025. ++ return false;
  10026. ++}
  10027. ++
  10028. + static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
  10029. + struct ieee80211_key_conf *key)
  10030. + {
  10031. +@@ -194,7 +237,7 @@ static int ath10k_clear_vdev_key(struct
  10032. + list_for_each_entry(peer, &ar->peers, list) {
  10033. + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
  10034. + if (peer->keys[i] == key) {
  10035. +- memcpy(addr, peer->addr, ETH_ALEN);
  10036. ++ ether_addr_copy(addr, peer->addr);
  10037. + peer->keys[i] = NULL;
  10038. + break;
  10039. + }
  10040. +@@ -207,20 +250,19 @@ static int ath10k_clear_vdev_key(struct
  10041. +
  10042. + if (i == ARRAY_SIZE(peer->keys))
  10043. + break;
  10044. +-
  10045. +- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
  10046. ++ /* key flags are not required to delete the key */
  10047. ++ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
  10048. + if (ret && first_errno == 0)
  10049. + first_errno = ret;
  10050. +
  10051. + if (ret)
  10052. +- ath10k_warn("failed to remove key for %pM: %d\n",
  10053. ++ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
  10054. + addr, ret);
  10055. + }
  10056. +
  10057. + return first_errno;
  10058. + }
  10059. +
  10060. +-
  10061. + /*********************/
  10062. + /* General utilities */
  10063. + /*********************/
  10064. +@@ -234,7 +276,10 @@ chan_to_phymode(const struct cfg80211_ch
  10065. + case IEEE80211_BAND_2GHZ:
  10066. + switch (chandef->width) {
  10067. + case NL80211_CHAN_WIDTH_20_NOHT:
  10068. +- phymode = MODE_11G;
  10069. ++ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
  10070. ++ phymode = MODE_11B;
  10071. ++ else
  10072. ++ phymode = MODE_11G;
  10073. + break;
  10074. + case NL80211_CHAN_WIDTH_20:
  10075. + phymode = MODE_11NG_HT20;
  10076. +@@ -322,22 +367,24 @@ static int ath10k_peer_create(struct ath
  10077. +
  10078. + lockdep_assert_held(&ar->conf_mutex);
  10079. +
  10080. ++ if (ar->num_peers >= ar->max_num_peers)
  10081. ++ return -ENOBUFS;
  10082. ++
  10083. + ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
  10084. + if (ret) {
  10085. +- ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
  10086. ++ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
  10087. + addr, vdev_id, ret);
  10088. + return ret;
  10089. + }
  10090. +
  10091. + ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
  10092. + if (ret) {
  10093. +- ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
  10094. ++ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
  10095. + addr, vdev_id, ret);
  10096. + return ret;
  10097. + }
  10098. +- spin_lock_bh(&ar->data_lock);
  10099. ++
  10100. + ar->num_peers++;
  10101. +- spin_unlock_bh(&ar->data_lock);
  10102. +
  10103. + return 0;
  10104. + }
  10105. +@@ -352,7 +399,7 @@ static int ath10k_mac_set_kickout(struct
  10106. + ret = ath10k_wmi_pdev_set_param(ar, param,
  10107. + ATH10K_KICKOUT_THRESHOLD);
  10108. + if (ret) {
  10109. +- ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
  10110. ++ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
  10111. + arvif->vdev_id, ret);
  10112. + return ret;
  10113. + }
  10114. +@@ -361,7 +408,7 @@ static int ath10k_mac_set_kickout(struct
  10115. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
  10116. + ATH10K_KEEPALIVE_MIN_IDLE);
  10117. + if (ret) {
  10118. +- ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
  10119. ++ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
  10120. + arvif->vdev_id, ret);
  10121. + return ret;
  10122. + }
  10123. +@@ -370,7 +417,7 @@ static int ath10k_mac_set_kickout(struct
  10124. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
  10125. + ATH10K_KEEPALIVE_MAX_IDLE);
  10126. + if (ret) {
  10127. +- ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
  10128. ++ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
  10129. + arvif->vdev_id, ret);
  10130. + return ret;
  10131. + }
  10132. +@@ -379,7 +426,7 @@ static int ath10k_mac_set_kickout(struct
  10133. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
  10134. + ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
  10135. + if (ret) {
  10136. +- ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
  10137. ++ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
  10138. + arvif->vdev_id, ret);
  10139. + return ret;
  10140. + }
  10141. +@@ -387,15 +434,11 @@ static int ath10k_mac_set_kickout(struct
  10142. + return 0;
  10143. + }
  10144. +
  10145. +-static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
  10146. ++static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
  10147. + {
  10148. + struct ath10k *ar = arvif->ar;
  10149. + u32 vdev_param;
  10150. +
  10151. +- if (value != 0xFFFFFFFF)
  10152. +- value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
  10153. +- ATH10K_RTS_MAX);
  10154. +-
  10155. + vdev_param = ar->wmi.vdev_param->rts_threshold;
  10156. + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
  10157. + }
  10158. +@@ -428,9 +471,7 @@ static int ath10k_peer_delete(struct ath
  10159. + if (ret)
  10160. + return ret;
  10161. +
  10162. +- spin_lock_bh(&ar->data_lock);
  10163. + ar->num_peers--;
  10164. +- spin_unlock_bh(&ar->data_lock);
  10165. +
  10166. + return 0;
  10167. + }
  10168. +@@ -446,7 +487,7 @@ static void ath10k_peer_cleanup(struct a
  10169. + if (peer->vdev_id != vdev_id)
  10170. + continue;
  10171. +
  10172. +- ath10k_warn("removing stale peer %pM from vdev_id %d\n",
  10173. ++ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
  10174. + peer->addr, vdev_id);
  10175. +
  10176. + list_del(&peer->list);
  10177. +@@ -467,20 +508,63 @@ static void ath10k_peer_cleanup_all(stru
  10178. + list_del(&peer->list);
  10179. + kfree(peer);
  10180. + }
  10181. +- ar->num_peers = 0;
  10182. + spin_unlock_bh(&ar->data_lock);
  10183. ++
  10184. ++ ar->num_peers = 0;
  10185. ++ ar->num_stations = 0;
  10186. + }
  10187. +
  10188. + /************************/
  10189. + /* Interface management */
  10190. + /************************/
  10191. +
  10192. ++void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
  10193. ++{
  10194. ++ struct ath10k *ar = arvif->ar;
  10195. ++
  10196. ++ lockdep_assert_held(&ar->data_lock);
  10197. ++
  10198. ++ if (!arvif->beacon)
  10199. ++ return;
  10200. ++
  10201. ++ if (!arvif->beacon_buf)
  10202. ++ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
  10203. ++ arvif->beacon->len, DMA_TO_DEVICE);
  10204. ++
  10205. ++ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
  10206. ++ arvif->beacon_state != ATH10K_BEACON_SENT))
  10207. ++ return;
  10208. ++
  10209. ++ dev_kfree_skb_any(arvif->beacon);
  10210. ++
  10211. ++ arvif->beacon = NULL;
  10212. ++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
  10213. ++}
  10214. ++
  10215. ++static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
  10216. ++{
  10217. ++ struct ath10k *ar = arvif->ar;
  10218. ++
  10219. ++ lockdep_assert_held(&ar->data_lock);
  10220. ++
  10221. ++ ath10k_mac_vif_beacon_free(arvif);
  10222. ++
  10223. ++ if (arvif->beacon_buf) {
  10224. ++ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
  10225. ++ arvif->beacon_buf, arvif->beacon_paddr);
  10226. ++ arvif->beacon_buf = NULL;
  10227. ++ }
  10228. ++}
  10229. ++
  10230. + static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
  10231. + {
  10232. + int ret;
  10233. +
  10234. + lockdep_assert_held(&ar->conf_mutex);
  10235. +
  10236. ++ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  10237. ++ return -ESHUTDOWN;
  10238. ++
  10239. + ret = wait_for_completion_timeout(&ar->vdev_setup_done,
  10240. + ATH10K_VDEV_SETUP_TIMEOUT_HZ);
  10241. + if (ret == 0)
  10242. +@@ -489,19 +573,6 @@ static inline int ath10k_vdev_setup_sync
  10243. + return 0;
  10244. + }
  10245. +
  10246. +-static bool ath10k_monitor_is_enabled(struct ath10k *ar)
  10247. +-{
  10248. +- lockdep_assert_held(&ar->conf_mutex);
  10249. +-
  10250. +- ath10k_dbg(ATH10K_DBG_MAC,
  10251. +- "mac monitor refs: promisc %d monitor %d cac %d\n",
  10252. +- ar->promisc, ar->monitor,
  10253. +- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
  10254. +-
  10255. +- return ar->promisc || ar->monitor ||
  10256. +- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  10257. +-}
  10258. +-
  10259. + static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
  10260. + {
  10261. + struct cfg80211_chan_def *chandef = &ar->chandef;
  10262. +@@ -526,37 +597,39 @@ static int ath10k_monitor_vdev_start(str
  10263. + arg.channel.max_reg_power = channel->max_reg_power * 2;
  10264. + arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
  10265. +
  10266. ++ reinit_completion(&ar->vdev_setup_done);
  10267. ++
  10268. + ret = ath10k_wmi_vdev_start(ar, &arg);
  10269. + if (ret) {
  10270. +- ath10k_warn("failed to request monitor vdev %i start: %d\n",
  10271. ++ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
  10272. + vdev_id, ret);
  10273. + return ret;
  10274. + }
  10275. +
  10276. + ret = ath10k_vdev_setup_sync(ar);
  10277. + if (ret) {
  10278. +- ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
  10279. ++ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
  10280. + vdev_id, ret);
  10281. + return ret;
  10282. + }
  10283. +
  10284. + ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
  10285. + if (ret) {
  10286. +- ath10k_warn("failed to put up monitor vdev %i: %d\n",
  10287. ++ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
  10288. + vdev_id, ret);
  10289. + goto vdev_stop;
  10290. + }
  10291. +
  10292. + ar->monitor_vdev_id = vdev_id;
  10293. +
  10294. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
  10295. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
  10296. + ar->monitor_vdev_id);
  10297. + return 0;
  10298. +
  10299. + vdev_stop:
  10300. + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
  10301. + if (ret)
  10302. +- ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
  10303. ++ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
  10304. + ar->monitor_vdev_id, ret);
  10305. +
  10306. + return ret;
  10307. +@@ -570,20 +643,22 @@ static int ath10k_monitor_vdev_stop(stru
  10308. +
  10309. + ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
  10310. + if (ret)
  10311. +- ath10k_warn("failed to put down monitor vdev %i: %d\n",
  10312. ++ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
  10313. + ar->monitor_vdev_id, ret);
  10314. +
  10315. ++ reinit_completion(&ar->vdev_setup_done);
  10316. ++
  10317. + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
  10318. + if (ret)
  10319. +- ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
  10320. ++ ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
  10321. + ar->monitor_vdev_id, ret);
  10322. +
  10323. + ret = ath10k_vdev_setup_sync(ar);
  10324. + if (ret)
  10325. +- ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
  10326. ++ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
  10327. + ar->monitor_vdev_id, ret);
  10328. +
  10329. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
  10330. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
  10331. + ar->monitor_vdev_id);
  10332. + return ret;
  10333. + }
  10334. +@@ -594,35 +669,29 @@ static int ath10k_monitor_vdev_create(st
  10335. +
  10336. + lockdep_assert_held(&ar->conf_mutex);
  10337. +
  10338. +- bit = ffs(ar->free_vdev_map);
  10339. +- if (bit == 0) {
  10340. +- ath10k_warn("failed to find free vdev id for monitor vdev\n");
  10341. ++ if (ar->free_vdev_map == 0) {
  10342. ++ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
  10343. + return -ENOMEM;
  10344. + }
  10345. +
  10346. +- ar->monitor_vdev_id = bit - 1;
  10347. +- ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
  10348. ++ bit = __ffs64(ar->free_vdev_map);
  10349. ++
  10350. ++ ar->monitor_vdev_id = bit;
  10351. +
  10352. + ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
  10353. + WMI_VDEV_TYPE_MONITOR,
  10354. + 0, ar->mac_addr);
  10355. + if (ret) {
  10356. +- ath10k_warn("failed to request monitor vdev %i creation: %d\n",
  10357. ++ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
  10358. + ar->monitor_vdev_id, ret);
  10359. +- goto vdev_fail;
  10360. ++ return ret;
  10361. + }
  10362. +
  10363. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
  10364. ++ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
  10365. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
  10366. + ar->monitor_vdev_id);
  10367. +
  10368. + return 0;
  10369. +-
  10370. +-vdev_fail:
  10371. +- /*
  10372. +- * Restore the ID to the global map.
  10373. +- */
  10374. +- ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
  10375. +- return ret;
  10376. + }
  10377. +
  10378. + static int ath10k_monitor_vdev_delete(struct ath10k *ar)
  10379. +@@ -633,14 +702,14 @@ static int ath10k_monitor_vdev_delete(st
  10380. +
  10381. + ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
  10382. + if (ret) {
  10383. +- ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
  10384. ++ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
  10385. + ar->monitor_vdev_id, ret);
  10386. + return ret;
  10387. + }
  10388. +
  10389. +- ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
  10390. ++ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
  10391. +
  10392. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
  10393. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
  10394. + ar->monitor_vdev_id);
  10395. + return ret;
  10396. + }
  10397. +@@ -651,63 +720,70 @@ static int ath10k_monitor_start(struct a
  10398. +
  10399. + lockdep_assert_held(&ar->conf_mutex);
  10400. +
  10401. +- if (!ath10k_monitor_is_enabled(ar)) {
  10402. +- ath10k_warn("trying to start monitor with no references\n");
  10403. +- return 0;
  10404. +- }
  10405. +-
  10406. +- if (ar->monitor_started) {
  10407. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
  10408. +- return 0;
  10409. +- }
  10410. +-
  10411. + ret = ath10k_monitor_vdev_create(ar);
  10412. + if (ret) {
  10413. +- ath10k_warn("failed to create monitor vdev: %d\n", ret);
  10414. ++ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
  10415. + return ret;
  10416. + }
  10417. +
  10418. + ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
  10419. + if (ret) {
  10420. +- ath10k_warn("failed to start monitor vdev: %d\n", ret);
  10421. ++ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
  10422. + ath10k_monitor_vdev_delete(ar);
  10423. + return ret;
  10424. + }
  10425. +
  10426. + ar->monitor_started = true;
  10427. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
  10428. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
  10429. +
  10430. + return 0;
  10431. + }
  10432. +
  10433. +-static void ath10k_monitor_stop(struct ath10k *ar)
  10434. ++static int ath10k_monitor_stop(struct ath10k *ar)
  10435. + {
  10436. + int ret;
  10437. +
  10438. + lockdep_assert_held(&ar->conf_mutex);
  10439. +
  10440. +- if (ath10k_monitor_is_enabled(ar)) {
  10441. +- ath10k_dbg(ATH10K_DBG_MAC,
  10442. +- "mac monitor will be stopped later\n");
  10443. +- return;
  10444. ++ ret = ath10k_monitor_vdev_stop(ar);
  10445. ++ if (ret) {
  10446. ++ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
  10447. ++ return ret;
  10448. + }
  10449. +
  10450. +- if (!ar->monitor_started) {
  10451. +- ath10k_dbg(ATH10K_DBG_MAC,
  10452. +- "mac monitor probably failed to start earlier\n");
  10453. +- return;
  10454. ++ ret = ath10k_monitor_vdev_delete(ar);
  10455. ++ if (ret) {
  10456. ++ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
  10457. ++ return ret;
  10458. + }
  10459. +
  10460. +- ret = ath10k_monitor_vdev_stop(ar);
  10461. +- if (ret)
  10462. +- ath10k_warn("failed to stop monitor vdev: %d\n", ret);
  10463. ++ ar->monitor_started = false;
  10464. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
  10465. +
  10466. +- ret = ath10k_monitor_vdev_delete(ar);
  10467. +- if (ret)
  10468. +- ath10k_warn("failed to delete monitor vdev: %d\n", ret);
  10469. ++ return 0;
  10470. ++}
  10471. +
  10472. +- ar->monitor_started = false;
  10473. +- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
  10474. ++static int ath10k_monitor_recalc(struct ath10k *ar)
  10475. ++{
  10476. ++ bool should_start;
  10477. ++
  10478. ++ lockdep_assert_held(&ar->conf_mutex);
  10479. ++
  10480. ++ should_start = ar->monitor ||
  10481. ++ ar->filter_flags & FIF_PROMISC_IN_BSS ||
  10482. ++ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  10483. ++
  10484. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  10485. ++ "mac monitor recalc started? %d should? %d\n",
  10486. ++ ar->monitor_started, should_start);
  10487. ++
  10488. ++ if (should_start == ar->monitor_started)
  10489. ++ return 0;
  10490. ++
  10491. ++ if (should_start)
  10492. ++ return ath10k_monitor_start(ar);
  10493. ++
  10494. ++ return ath10k_monitor_stop(ar);
  10495. + }
  10496. +
  10497. + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
  10498. +@@ -738,14 +814,14 @@ static int ath10k_start_cac(struct ath10
  10499. +
  10500. + set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  10501. +
  10502. +- ret = ath10k_monitor_start(ar);
  10503. ++ ret = ath10k_monitor_recalc(ar);
  10504. + if (ret) {
  10505. +- ath10k_warn("failed to start monitor (cac): %d\n", ret);
  10506. ++ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
  10507. + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  10508. + return ret;
  10509. + }
  10510. +
  10511. +- ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
  10512. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
  10513. + ar->monitor_vdev_id);
  10514. +
  10515. + return 0;
  10516. +@@ -762,7 +838,7 @@ static int ath10k_stop_cac(struct ath10k
  10517. + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  10518. + ath10k_monitor_stop(ar);
  10519. +
  10520. +- ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
  10521. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
  10522. +
  10523. + return 0;
  10524. + }
  10525. +@@ -788,12 +864,12 @@ static void ath10k_recalc_radar_detectio
  10526. + * radiation is not allowed, make this channel DFS_UNAVAILABLE
  10527. + * by indicating that radar was detected.
  10528. + */
  10529. +- ath10k_warn("failed to start CAC: %d\n", ret);
  10530. ++ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
  10531. + ieee80211_radar_detected(ar->hw);
  10532. + }
  10533. + }
  10534. +
  10535. +-static int ath10k_vdev_start(struct ath10k_vif *arvif)
  10536. ++static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
  10537. + {
  10538. + struct ath10k *ar = arvif->ar;
  10539. + struct cfg80211_chan_def *chandef = &ar->chandef;
  10540. +@@ -830,22 +906,27 @@ static int ath10k_vdev_start(struct ath1
  10541. + arg.ssid_len = arvif->vif->bss_conf.ssid_len;
  10542. + }
  10543. +
  10544. +- ath10k_dbg(ATH10K_DBG_MAC,
  10545. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  10546. + "mac vdev %d start center_freq %d phymode %s\n",
  10547. + arg.vdev_id, arg.channel.freq,
  10548. + ath10k_wmi_phymode_str(arg.channel.mode));
  10549. +
  10550. +- ret = ath10k_wmi_vdev_start(ar, &arg);
  10551. ++ if (restart)
  10552. ++ ret = ath10k_wmi_vdev_restart(ar, &arg);
  10553. ++ else
  10554. ++ ret = ath10k_wmi_vdev_start(ar, &arg);
  10555. ++
  10556. + if (ret) {
  10557. +- ath10k_warn("failed to start WMI vdev %i: %d\n",
  10558. ++ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
  10559. + arg.vdev_id, ret);
  10560. + return ret;
  10561. + }
  10562. +
  10563. + ret = ath10k_vdev_setup_sync(ar);
  10564. + if (ret) {
  10565. +- ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
  10566. +- arg.vdev_id, ret);
  10567. ++ ath10k_warn(ar,
  10568. ++ "failed to synchronize setup for vdev %i restart %d: %d\n",
  10569. ++ arg.vdev_id, restart, ret);
  10570. + return ret;
  10571. + }
  10572. +
  10573. +@@ -855,6 +936,16 @@ static int ath10k_vdev_start(struct ath1
  10574. + return ret;
  10575. + }
  10576. +
  10577. ++static int ath10k_vdev_start(struct ath10k_vif *arvif)
  10578. ++{
  10579. ++ return ath10k_vdev_start_restart(arvif, false);
  10580. ++}
  10581. ++
  10582. ++static int ath10k_vdev_restart(struct ath10k_vif *arvif)
  10583. ++{
  10584. ++ return ath10k_vdev_start_restart(arvif, true);
  10585. ++}
  10586. ++
  10587. + static int ath10k_vdev_stop(struct ath10k_vif *arvif)
  10588. + {
  10589. + struct ath10k *ar = arvif->ar;
  10590. +@@ -866,14 +957,14 @@ static int ath10k_vdev_stop(struct ath10
  10591. +
  10592. + ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
  10593. + if (ret) {
  10594. +- ath10k_warn("failed to stop WMI vdev %i: %d\n",
  10595. ++ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
  10596. + arvif->vdev_id, ret);
  10597. + return ret;
  10598. + }
  10599. +
  10600. + ret = ath10k_vdev_setup_sync(ar);
  10601. + if (ret) {
  10602. +- ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
  10603. ++ ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
  10604. + arvif->vdev_id, ret);
  10605. + return ret;
  10606. + }
  10607. +@@ -888,9 +979,147 @@ static int ath10k_vdev_stop(struct ath10
  10608. + return ret;
  10609. + }
  10610. +
  10611. ++static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
  10612. ++ struct sk_buff *bcn)
  10613. ++{
  10614. ++ struct ath10k *ar = arvif->ar;
  10615. ++ struct ieee80211_mgmt *mgmt;
  10616. ++ const u8 *p2p_ie;
  10617. ++ int ret;
  10618. ++
  10619. ++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
  10620. ++ return 0;
  10621. ++
  10622. ++ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  10623. ++ return 0;
  10624. ++
  10625. ++ mgmt = (void *)bcn->data;
  10626. ++ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
  10627. ++ mgmt->u.beacon.variable,
  10628. ++ bcn->len - (mgmt->u.beacon.variable -
  10629. ++ bcn->data));
  10630. ++ if (!p2p_ie)
  10631. ++ return -ENOENT;
  10632. ++
  10633. ++ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
  10634. ++ if (ret) {
  10635. ++ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
  10636. ++ arvif->vdev_id, ret);
  10637. ++ return ret;
  10638. ++ }
  10639. ++
  10640. ++ return 0;
  10641. ++}
  10642. ++
  10643. ++static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
  10644. ++ u8 oui_type, size_t ie_offset)
  10645. ++{
  10646. ++ size_t len;
  10647. ++ const u8 *next;
  10648. ++ const u8 *end;
  10649. ++ u8 *ie;
  10650. ++
  10651. ++ if (WARN_ON(skb->len < ie_offset))
  10652. ++ return -EINVAL;
  10653. ++
  10654. ++ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
  10655. ++ skb->data + ie_offset,
  10656. ++ skb->len - ie_offset);
  10657. ++ if (!ie)
  10658. ++ return -ENOENT;
  10659. ++
  10660. ++ len = ie[1] + 2;
  10661. ++ end = skb->data + skb->len;
  10662. ++ next = ie + len;
  10663. ++
  10664. ++ if (WARN_ON(next > end))
  10665. ++ return -EINVAL;
  10666. ++
  10667. ++ memmove(ie, next, end - next);
  10668. ++ skb_trim(skb, skb->len - len);
  10669. ++
  10670. ++ return 0;
  10671. ++}
  10672. ++
  10673. ++static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
  10674. ++{
  10675. ++ struct ath10k *ar = arvif->ar;
  10676. ++ struct ieee80211_hw *hw = ar->hw;
  10677. ++ struct ieee80211_vif *vif = arvif->vif;
  10678. ++ struct ieee80211_mutable_offsets offs = {};
  10679. ++ struct sk_buff *bcn;
  10680. ++ int ret;
  10681. ++
  10682. ++ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
  10683. ++ return 0;
  10684. ++
  10685. ++ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
  10686. ++ if (!bcn) {
  10687. ++ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
  10688. ++ return -EPERM;
  10689. ++ }
  10690. ++
  10691. ++ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
  10692. ++ if (ret) {
  10693. ++ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
  10694. ++ kfree_skb(bcn);
  10695. ++ return ret;
  10696. ++ }
  10697. ++
  10698. ++ /* P2P IE is inserted by firmware automatically (as configured above)
  10699. ++ * so remove it from the base beacon template to avoid duplicate P2P
  10700. ++ * IEs in beacon frames.
  10701. ++ */
  10702. ++ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
  10703. ++ offsetof(struct ieee80211_mgmt,
  10704. ++ u.beacon.variable));
  10705. ++
  10706. ++ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
  10707. ++ 0, NULL, 0);
  10708. ++ kfree_skb(bcn);
  10709. ++
  10710. ++ if (ret) {
  10711. ++ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
  10712. ++ ret);
  10713. ++ return ret;
  10714. ++ }
  10715. ++
  10716. ++ return 0;
  10717. ++}
  10718. ++
  10719. ++static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
  10720. ++{
  10721. ++ struct ath10k *ar = arvif->ar;
  10722. ++ struct ieee80211_hw *hw = ar->hw;
  10723. ++ struct ieee80211_vif *vif = arvif->vif;
  10724. ++ struct sk_buff *prb;
  10725. ++ int ret;
  10726. ++
  10727. ++ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
  10728. ++ return 0;
  10729. ++
  10730. ++ prb = ieee80211_proberesp_get(hw, vif);
  10731. ++ if (!prb) {
  10732. ++ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
  10733. ++ return -EPERM;
  10734. ++ }
  10735. ++
  10736. ++ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
  10737. ++ kfree_skb(prb);
  10738. ++
  10739. ++ if (ret) {
  10740. ++ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
  10741. ++ ret);
  10742. ++ return ret;
  10743. ++ }
  10744. ++
  10745. ++ return 0;
  10746. ++}
  10747. ++
  10748. + static void ath10k_control_beaconing(struct ath10k_vif *arvif,
  10749. +- struct ieee80211_bss_conf *info)
  10750. ++ struct ieee80211_bss_conf *info)
  10751. + {
  10752. ++ struct ath10k *ar = arvif->ar;
  10753. + int ret = 0;
  10754. +
  10755. + lockdep_assert_held(&arvif->ar->conf_mutex);
  10756. +@@ -902,15 +1131,7 @@ static void ath10k_control_beaconing(str
  10757. + arvif->is_up = false;
  10758. +
  10759. + spin_lock_bh(&arvif->ar->data_lock);
  10760. +- if (arvif->beacon) {
  10761. +- dma_unmap_single(arvif->ar->dev,
  10762. +- ATH10K_SKB_CB(arvif->beacon)->paddr,
  10763. +- arvif->beacon->len, DMA_TO_DEVICE);
  10764. +- dev_kfree_skb_any(arvif->beacon);
  10765. +-
  10766. +- arvif->beacon = NULL;
  10767. +- arvif->beacon_sent = false;
  10768. +- }
  10769. ++ ath10k_mac_vif_beacon_free(arvif);
  10770. + spin_unlock_bh(&arvif->ar->data_lock);
  10771. +
  10772. + return;
  10773. +@@ -923,12 +1144,12 @@ static void ath10k_control_beaconing(str
  10774. + return;
  10775. +
  10776. + arvif->aid = 0;
  10777. +- memcpy(arvif->bssid, info->bssid, ETH_ALEN);
  10778. ++ ether_addr_copy(arvif->bssid, info->bssid);
  10779. +
  10780. + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
  10781. + arvif->bssid);
  10782. + if (ret) {
  10783. +- ath10k_warn("failed to bring up vdev %d: %i\n",
  10784. ++ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
  10785. + arvif->vdev_id, ret);
  10786. + ath10k_vdev_stop(arvif);
  10787. + return;
  10788. +@@ -937,13 +1158,14 @@ static void ath10k_control_beaconing(str
  10789. + arvif->is_started = true;
  10790. + arvif->is_up = true;
  10791. +
  10792. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
  10793. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
  10794. + }
  10795. +
  10796. + static void ath10k_control_ibss(struct ath10k_vif *arvif,
  10797. + struct ieee80211_bss_conf *info,
  10798. + const u8 self_peer[ETH_ALEN])
  10799. + {
  10800. ++ struct ath10k *ar = arvif->ar;
  10801. + u32 vdev_param;
  10802. + int ret = 0;
  10803. +
  10804. +@@ -952,20 +1174,12 @@ static void ath10k_control_ibss(struct a
  10805. + if (!info->ibss_joined) {
  10806. + ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
  10807. + if (ret)
  10808. +- ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
  10809. ++ ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
  10810. + self_peer, arvif->vdev_id, ret);
  10811. +
  10812. + if (is_zero_ether_addr(arvif->bssid))
  10813. + return;
  10814. +
  10815. +- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
  10816. +- arvif->bssid);
  10817. +- if (ret) {
  10818. +- ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
  10819. +- arvif->bssid, arvif->vdev_id, ret);
  10820. +- return;
  10821. +- }
  10822. +-
  10823. + memset(arvif->bssid, 0, ETH_ALEN);
  10824. +
  10825. + return;
  10826. +@@ -973,7 +1187,7 @@ static void ath10k_control_ibss(struct a
  10827. +
  10828. + ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
  10829. + if (ret) {
  10830. +- ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
  10831. ++ ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
  10832. + self_peer, arvif->vdev_id, ret);
  10833. + return;
  10834. + }
  10835. +@@ -982,103 +1196,211 @@ static void ath10k_control_ibss(struct a
  10836. + ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
  10837. + ATH10K_DEFAULT_ATIM);
  10838. + if (ret)
  10839. +- ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
  10840. ++ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
  10841. + arvif->vdev_id, ret);
  10842. + }
  10843. +
  10844. +-/*
  10845. +- * Review this when mac80211 gains per-interface powersave support.
  10846. +- */
  10847. +-static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
  10848. ++static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
  10849. + {
  10850. + struct ath10k *ar = arvif->ar;
  10851. +- struct ieee80211_conf *conf = &ar->hw->conf;
  10852. +- enum wmi_sta_powersave_param param;
  10853. +- enum wmi_sta_ps_mode psmode;
  10854. ++ u32 param;
  10855. ++ u32 value;
  10856. + int ret;
  10857. +
  10858. + lockdep_assert_held(&arvif->ar->conf_mutex);
  10859. +
  10860. +- if (arvif->vif->type != NL80211_IFTYPE_STATION)
  10861. +- return 0;
  10862. +-
  10863. +- if (conf->flags & IEEE80211_CONF_PS) {
  10864. +- psmode = WMI_STA_PS_MODE_ENABLED;
  10865. +- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
  10866. ++ if (arvif->u.sta.uapsd)
  10867. ++ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
  10868. ++ else
  10869. ++ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
  10870. +
  10871. +- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
  10872. +- conf->dynamic_ps_timeout);
  10873. +- if (ret) {
  10874. +- ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
  10875. +- arvif->vdev_id, ret);
  10876. +- return ret;
  10877. +- }
  10878. +- } else {
  10879. +- psmode = WMI_STA_PS_MODE_DISABLED;
  10880. ++ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
  10881. ++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
  10882. ++ if (ret) {
  10883. ++ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
  10884. ++ value, arvif->vdev_id, ret);
  10885. ++ return ret;
  10886. + }
  10887. +
  10888. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
  10889. +- arvif->vdev_id, psmode ? "enable" : "disable");
  10890. ++ return 0;
  10891. ++}
  10892. +
  10893. +- ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
  10894. ++static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
  10895. ++{
  10896. ++ struct ath10k *ar = arvif->ar;
  10897. ++ u32 param;
  10898. ++ u32 value;
  10899. ++ int ret;
  10900. ++
  10901. ++ lockdep_assert_held(&arvif->ar->conf_mutex);
  10902. ++
  10903. ++ if (arvif->u.sta.uapsd)
  10904. ++ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
  10905. ++ else
  10906. ++ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
  10907. ++
  10908. ++ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
  10909. ++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
  10910. ++ param, value);
  10911. + if (ret) {
  10912. +- ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
  10913. +- psmode, arvif->vdev_id, ret);
  10914. ++ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
  10915. ++ value, arvif->vdev_id, ret);
  10916. + return ret;
  10917. + }
  10918. +
  10919. + return 0;
  10920. + }
  10921. +
  10922. +-/**********************/
  10923. +-/* Station management */
  10924. +-/**********************/
  10925. +-
  10926. +-static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
  10927. +- struct ath10k_vif *arvif,
  10928. +- struct ieee80211_sta *sta,
  10929. +- struct ieee80211_bss_conf *bss_conf,
  10930. +- struct wmi_peer_assoc_complete_arg *arg)
  10931. ++static int ath10k_mac_ps_vif_count(struct ath10k *ar)
  10932. + {
  10933. ++ struct ath10k_vif *arvif;
  10934. ++ int num = 0;
  10935. ++
  10936. + lockdep_assert_held(&ar->conf_mutex);
  10937. +
  10938. +- memcpy(arg->addr, sta->addr, ETH_ALEN);
  10939. +- arg->vdev_id = arvif->vdev_id;
  10940. +- arg->peer_aid = sta->aid;
  10941. +- arg->peer_flags |= WMI_PEER_AUTH;
  10942. ++ list_for_each_entry(arvif, &ar->arvifs, list)
  10943. ++ if (arvif->ps)
  10944. ++ num++;
  10945. +
  10946. +- if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
  10947. +- /*
  10948. +- * Seems FW have problems with Power Save in STA
  10949. +- * mode when we setup this parameter to high (eg. 5).
  10950. +- * Often we see that FW don't send NULL (with clean P flags)
  10951. +- * frame even there is info about buffered frames in beacons.
  10952. +- * Sometimes we have to wait more than 10 seconds before FW
  10953. +- * will wakeup. Often sending one ping from AP to our device
  10954. +- * just fail (more than 50%).
  10955. +- *
  10956. +- * Seems setting this FW parameter to 1 couse FW
  10957. +- * will check every beacon and will wakup immediately
  10958. +- * after detection buffered data.
  10959. +- */
  10960. +- arg->peer_listen_intval = 1;
  10961. +- else
  10962. +- arg->peer_listen_intval = ar->hw->conf.listen_interval;
  10963. ++ return num;
  10964. ++}
  10965. +
  10966. +- arg->peer_num_spatial_streams = 1;
  10967. ++static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
  10968. ++{
  10969. ++ struct ath10k *ar = arvif->ar;
  10970. ++ struct ieee80211_vif *vif = arvif->vif;
  10971. ++ struct ieee80211_conf *conf = &ar->hw->conf;
  10972. ++ enum wmi_sta_powersave_param param;
  10973. ++ enum wmi_sta_ps_mode psmode;
  10974. ++ int ret;
  10975. ++ int ps_timeout;
  10976. ++ bool enable_ps;
  10977. +
  10978. +- /*
  10979. +- * The assoc capabilities are available only in managed mode.
  10980. ++ lockdep_assert_held(&arvif->ar->conf_mutex);
  10981. ++
  10982. ++ if (arvif->vif->type != NL80211_IFTYPE_STATION)
  10983. ++ return 0;
  10984. ++
  10985. ++ enable_ps = arvif->ps;
  10986. ++
  10987. ++ if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
  10988. ++ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
  10989. ++ ar->fw_features)) {
  10990. ++ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
  10991. ++ arvif->vdev_id);
  10992. ++ enable_ps = false;
  10993. ++ }
  10994. ++
  10995. ++ if (enable_ps) {
  10996. ++ psmode = WMI_STA_PS_MODE_ENABLED;
  10997. ++ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
  10998. ++
  10999. ++ ps_timeout = conf->dynamic_ps_timeout;
  11000. ++ if (ps_timeout == 0) {
  11001. ++ /* Firmware doesn't like 0 */
  11002. ++ ps_timeout = ieee80211_tu_to_usec(
  11003. ++ vif->bss_conf.beacon_int) / 1000;
  11004. ++ }
  11005. ++
  11006. ++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
  11007. ++ ps_timeout);
  11008. ++ if (ret) {
  11009. ++ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
  11010. ++ arvif->vdev_id, ret);
  11011. ++ return ret;
  11012. ++ }
  11013. ++ } else {
  11014. ++ psmode = WMI_STA_PS_MODE_DISABLED;
  11015. ++ }
  11016. ++
  11017. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
  11018. ++ arvif->vdev_id, psmode ? "enable" : "disable");
  11019. ++
  11020. ++ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
  11021. ++ if (ret) {
  11022. ++ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
  11023. ++ psmode, arvif->vdev_id, ret);
  11024. ++ return ret;
  11025. ++ }
  11026. ++
  11027. ++ return 0;
  11028. ++}
  11029. ++
  11030. ++static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
  11031. ++{
  11032. ++ struct ath10k *ar = arvif->ar;
  11033. ++ struct wmi_sta_keepalive_arg arg = {};
  11034. ++ int ret;
  11035. ++
  11036. ++ lockdep_assert_held(&arvif->ar->conf_mutex);
  11037. ++
  11038. ++ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
  11039. ++ return 0;
  11040. ++
  11041. ++ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
  11042. ++ return 0;
  11043. ++
  11044. ++ /* Some firmware revisions have a bug and ignore the `enabled` field.
  11045. ++ * Instead use the interval to disable the keepalive.
  11046. ++ */
  11047. ++ arg.vdev_id = arvif->vdev_id;
  11048. ++ arg.enabled = 1;
  11049. ++ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
  11050. ++ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
  11051. ++
  11052. ++ ret = ath10k_wmi_sta_keepalive(ar, &arg);
  11053. ++ if (ret) {
  11054. ++ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
  11055. ++ arvif->vdev_id, ret);
  11056. ++ return ret;
  11057. ++ }
  11058. ++
  11059. ++ return 0;
  11060. ++}
  11061. ++
  11062. ++/**********************/
  11063. ++/* Station management */
  11064. ++/**********************/
  11065. ++
  11066. ++static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
  11067. ++ struct ieee80211_vif *vif)
  11068. ++{
  11069. ++ /* Some firmware revisions have unstable STA powersave when listen
  11070. ++ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
  11071. ++ * generate NullFunc frames properly even if buffered frames have been
  11072. ++ * indicated in Beacon TIM. Firmware would seldom wake up to pull
  11073. ++ * buffered frames. Often pinging the device from AP would simply fail.
  11074. ++ *
  11075. ++ * As a workaround set it to 1.
  11076. + */
  11077. +- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
  11078. +- arg->peer_caps = bss_conf->assoc_capability;
  11079. ++ if (vif->type == NL80211_IFTYPE_STATION)
  11080. ++ return 1;
  11081. ++
  11082. ++ return ar->hw->conf.listen_interval;
  11083. ++}
  11084. ++
  11085. ++static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
  11086. ++ struct ieee80211_vif *vif,
  11087. ++ struct ieee80211_sta *sta,
  11088. ++ struct wmi_peer_assoc_complete_arg *arg)
  11089. ++{
  11090. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11091. ++
  11092. ++ lockdep_assert_held(&ar->conf_mutex);
  11093. ++
  11094. ++ ether_addr_copy(arg->addr, sta->addr);
  11095. ++ arg->vdev_id = arvif->vdev_id;
  11096. ++ arg->peer_aid = sta->aid;
  11097. ++ arg->peer_flags |= WMI_PEER_AUTH;
  11098. ++ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
  11099. ++ arg->peer_num_spatial_streams = 1;
  11100. ++ arg->peer_caps = vif->bss_conf.assoc_capability;
  11101. + }
  11102. +
  11103. + static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
  11104. +- struct ath10k_vif *arvif,
  11105. ++ struct ieee80211_vif *vif,
  11106. + struct wmi_peer_assoc_complete_arg *arg)
  11107. + {
  11108. +- struct ieee80211_vif *vif = arvif->vif;
  11109. + struct ieee80211_bss_conf *info = &vif->bss_conf;
  11110. + struct cfg80211_bss *bss;
  11111. + const u8 *rsnie = NULL;
  11112. +@@ -1097,21 +1419,21 @@ static void ath10k_peer_assoc_h_crypto(s
  11113. + ies = rcu_dereference(bss->ies);
  11114. +
  11115. + wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
  11116. +- WLAN_OUI_TYPE_MICROSOFT_WPA,
  11117. +- ies->data,
  11118. +- ies->len);
  11119. ++ WLAN_OUI_TYPE_MICROSOFT_WPA,
  11120. ++ ies->data,
  11121. ++ ies->len);
  11122. + rcu_read_unlock();
  11123. + cfg80211_put_bss(ar->hw->wiphy, bss);
  11124. + }
  11125. +
  11126. + /* FIXME: base on RSN IE/WPA IE is a correct idea? */
  11127. + if (rsnie || wpaie) {
  11128. +- ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
  11129. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
  11130. + arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
  11131. + }
  11132. +
  11133. + if (wpaie) {
  11134. +- ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
  11135. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
  11136. + arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
  11137. + }
  11138. + }
  11139. +@@ -1149,6 +1471,7 @@ static void ath10k_peer_assoc_h_ht(struc
  11140. + {
  11141. + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
  11142. + int i, n;
  11143. ++ u32 stbc;
  11144. +
  11145. + lockdep_assert_held(&ar->conf_mutex);
  11146. +
  11147. +@@ -1185,7 +1508,6 @@ static void ath10k_peer_assoc_h_ht(struc
  11148. + }
  11149. +
  11150. + if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
  11151. +- u32 stbc;
  11152. + stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
  11153. + stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
  11154. + stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
  11155. +@@ -1220,7 +1542,7 @@ static void ath10k_peer_assoc_h_ht(struc
  11156. + arg->peer_num_spatial_streams = sta->rx_nss;
  11157. + }
  11158. +
  11159. +- ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
  11160. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
  11161. + arg->addr,
  11162. + arg->peer_ht_rates.num_rates,
  11163. + arg->peer_num_spatial_streams);
  11164. +@@ -1237,7 +1559,7 @@ static int ath10k_peer_assoc_qos_ap(stru
  11165. + lockdep_assert_held(&ar->conf_mutex);
  11166. +
  11167. + if (sta->wme && sta->uapsd_queues) {
  11168. +- ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
  11169. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
  11170. + sta->uapsd_queues, sta->max_sp);
  11171. +
  11172. + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
  11173. +@@ -1253,7 +1575,6 @@ static int ath10k_peer_assoc_qos_ap(stru
  11174. + uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
  11175. + WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
  11176. +
  11177. +-
  11178. + if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
  11179. + max_sp = sta->max_sp;
  11180. +
  11181. +@@ -1262,7 +1583,7 @@ static int ath10k_peer_assoc_qos_ap(stru
  11182. + WMI_AP_PS_PEER_PARAM_UAPSD,
  11183. + uapsd);
  11184. + if (ret) {
  11185. +- ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
  11186. ++ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
  11187. + arvif->vdev_id, ret);
  11188. + return ret;
  11189. + }
  11190. +@@ -1272,7 +1593,7 @@ static int ath10k_peer_assoc_qos_ap(stru
  11191. + WMI_AP_PS_PEER_PARAM_MAX_SP,
  11192. + max_sp);
  11193. + if (ret) {
  11194. +- ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
  11195. ++ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
  11196. + arvif->vdev_id, ret);
  11197. + return ret;
  11198. + }
  11199. +@@ -1282,9 +1603,10 @@ static int ath10k_peer_assoc_qos_ap(stru
  11200. + sta->listen_interval - mac80211 patch required.
  11201. + Currently use 10 seconds */
  11202. + ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
  11203. +- WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
  11204. ++ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
  11205. ++ 10);
  11206. + if (ret) {
  11207. +- ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
  11208. ++ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
  11209. + arvif->vdev_id, ret);
  11210. + return ret;
  11211. + }
  11212. +@@ -1304,8 +1626,11 @@ static void ath10k_peer_assoc_h_vht(stru
  11213. + return;
  11214. +
  11215. + arg->peer_flags |= WMI_PEER_VHT;
  11216. +- arg->peer_vht_caps = vht_cap->cap;
  11217. +
  11218. ++ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
  11219. ++ arg->peer_flags |= WMI_PEER_VHT_2G;
  11220. ++
  11221. ++ arg->peer_vht_caps = vht_cap->cap;
  11222. +
  11223. + ampdu_factor = (vht_cap->cap &
  11224. + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
  11225. +@@ -1331,16 +1656,17 @@ static void ath10k_peer_assoc_h_vht(stru
  11226. + arg->peer_vht_rates.tx_mcs_set =
  11227. + __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
  11228. +
  11229. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
  11230. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
  11231. + sta->addr, arg->peer_max_mpdu, arg->peer_flags);
  11232. + }
  11233. +
  11234. + static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
  11235. +- struct ath10k_vif *arvif,
  11236. ++ struct ieee80211_vif *vif,
  11237. + struct ieee80211_sta *sta,
  11238. +- struct ieee80211_bss_conf *bss_conf,
  11239. + struct wmi_peer_assoc_complete_arg *arg)
  11240. + {
  11241. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11242. ++
  11243. + switch (arvif->vdev_type) {
  11244. + case WMI_VDEV_TYPE_AP:
  11245. + if (sta->wme)
  11246. +@@ -1352,16 +1678,29 @@ static void ath10k_peer_assoc_h_qos(stru
  11247. + }
  11248. + break;
  11249. + case WMI_VDEV_TYPE_STA:
  11250. +- if (bss_conf->qos)
  11251. ++ if (vif->bss_conf.qos)
  11252. ++ arg->peer_flags |= WMI_PEER_QOS;
  11253. ++ break;
  11254. ++ case WMI_VDEV_TYPE_IBSS:
  11255. ++ if (sta->wme)
  11256. + arg->peer_flags |= WMI_PEER_QOS;
  11257. + break;
  11258. + default:
  11259. + break;
  11260. + }
  11261. ++
  11262. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
  11263. ++ sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
  11264. ++}
  11265. ++
  11266. ++static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
  11267. ++{
  11268. ++ /* First 4 rates in ath10k_rates are CCK (11b) rates. */
  11269. ++ return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
  11270. + }
  11271. +
  11272. + static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
  11273. +- struct ath10k_vif *arvif,
  11274. ++ struct ieee80211_vif *vif,
  11275. + struct ieee80211_sta *sta,
  11276. + struct wmi_peer_assoc_complete_arg *arg)
  11277. + {
  11278. +@@ -1369,13 +1708,20 @@ static void ath10k_peer_assoc_h_phymode(
  11279. +
  11280. + switch (ar->hw->conf.chandef.chan->band) {
  11281. + case IEEE80211_BAND_2GHZ:
  11282. +- if (sta->ht_cap.ht_supported) {
  11283. ++ if (sta->vht_cap.vht_supported) {
  11284. ++ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
  11285. ++ phymode = MODE_11AC_VHT40;
  11286. ++ else
  11287. ++ phymode = MODE_11AC_VHT20;
  11288. ++ } else if (sta->ht_cap.ht_supported) {
  11289. + if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
  11290. + phymode = MODE_11NG_HT40;
  11291. + else
  11292. + phymode = MODE_11NG_HT20;
  11293. +- } else {
  11294. ++ } else if (ath10k_mac_sta_has_11g_rates(sta)) {
  11295. + phymode = MODE_11G;
  11296. ++ } else {
  11297. ++ phymode = MODE_11B;
  11298. + }
  11299. +
  11300. + break;
  11301. +@@ -1404,7 +1750,7 @@ static void ath10k_peer_assoc_h_phymode(
  11302. + break;
  11303. + }
  11304. +
  11305. +- ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
  11306. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
  11307. + sta->addr, ath10k_wmi_phymode_str(phymode));
  11308. +
  11309. + arg->peer_phymode = phymode;
  11310. +@@ -1412,22 +1758,21 @@ static void ath10k_peer_assoc_h_phymode(
  11311. + }
  11312. +
  11313. + static int ath10k_peer_assoc_prepare(struct ath10k *ar,
  11314. +- struct ath10k_vif *arvif,
  11315. ++ struct ieee80211_vif *vif,
  11316. + struct ieee80211_sta *sta,
  11317. +- struct ieee80211_bss_conf *bss_conf,
  11318. + struct wmi_peer_assoc_complete_arg *arg)
  11319. + {
  11320. + lockdep_assert_held(&ar->conf_mutex);
  11321. +
  11322. + memset(arg, 0, sizeof(*arg));
  11323. +
  11324. +- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
  11325. +- ath10k_peer_assoc_h_crypto(ar, arvif, arg);
  11326. ++ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
  11327. ++ ath10k_peer_assoc_h_crypto(ar, vif, arg);
  11328. + ath10k_peer_assoc_h_rates(ar, sta, arg);
  11329. + ath10k_peer_assoc_h_ht(ar, sta, arg);
  11330. + ath10k_peer_assoc_h_vht(ar, sta, arg);
  11331. +- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
  11332. +- ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
  11333. ++ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
  11334. ++ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
  11335. +
  11336. + return 0;
  11337. + }
  11338. +@@ -1459,6 +1804,68 @@ static int ath10k_setup_peer_smps(struct
  11339. + ath10k_smps_map[smps]);
  11340. + }
  11341. +
  11342. ++static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
  11343. ++ struct ieee80211_vif *vif,
  11344. ++ struct ieee80211_sta_vht_cap vht_cap)
  11345. ++{
  11346. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11347. ++ int ret;
  11348. ++ u32 param;
  11349. ++ u32 value;
  11350. ++
  11351. ++ if (!(ar->vht_cap_info &
  11352. ++ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
  11353. ++ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
  11354. ++ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
  11355. ++ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
  11356. ++ return 0;
  11357. ++
  11358. ++ param = ar->wmi.vdev_param->txbf;
  11359. ++ value = 0;
  11360. ++
  11361. ++ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
  11362. ++ return 0;
  11363. ++
  11364. ++ /* The following logic is correct. If a remote STA advertises support
  11365. ++ * for being a beamformer then we should enable us being a beamformee.
  11366. ++ */
  11367. ++
  11368. ++ if (ar->vht_cap_info &
  11369. ++ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
  11370. ++ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
  11371. ++ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
  11372. ++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
  11373. ++
  11374. ++ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
  11375. ++ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
  11376. ++ }
  11377. ++
  11378. ++ if (ar->vht_cap_info &
  11379. ++ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
  11380. ++ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
  11381. ++ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
  11382. ++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
  11383. ++
  11384. ++ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
  11385. ++ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
  11386. ++ }
  11387. ++
  11388. ++ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
  11389. ++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
  11390. ++
  11391. ++ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
  11392. ++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
  11393. ++
  11394. ++ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
  11395. ++ if (ret) {
  11396. ++ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
  11397. ++ value, ret);
  11398. ++ return ret;
  11399. ++ }
  11400. ++
  11401. ++ return 0;
  11402. ++}
  11403. ++
  11404. + /* can be called only in mac80211 callbacks due to `key_count` usage */
  11405. + static void ath10k_bss_assoc(struct ieee80211_hw *hw,
  11406. + struct ieee80211_vif *vif,
  11407. +@@ -1467,17 +1874,21 @@ static void ath10k_bss_assoc(struct ieee
  11408. + struct ath10k *ar = hw->priv;
  11409. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11410. + struct ieee80211_sta_ht_cap ht_cap;
  11411. ++ struct ieee80211_sta_vht_cap vht_cap;
  11412. + struct wmi_peer_assoc_complete_arg peer_arg;
  11413. + struct ieee80211_sta *ap_sta;
  11414. + int ret;
  11415. +
  11416. + lockdep_assert_held(&ar->conf_mutex);
  11417. +
  11418. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
  11419. ++ arvif->vdev_id, arvif->bssid, arvif->aid);
  11420. ++
  11421. + rcu_read_lock();
  11422. +
  11423. + ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
  11424. + if (!ap_sta) {
  11425. +- ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
  11426. ++ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
  11427. + bss_conf->bssid, arvif->vdev_id);
  11428. + rcu_read_unlock();
  11429. + return;
  11430. +@@ -1486,11 +1897,11 @@ static void ath10k_bss_assoc(struct ieee
  11431. + /* ap_sta must be accessed only within rcu section which must be left
  11432. + * before calling ath10k_setup_peer_smps() which might sleep. */
  11433. + ht_cap = ap_sta->ht_cap;
  11434. ++ vht_cap = ap_sta->vht_cap;
  11435. +
  11436. +- ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
  11437. +- bss_conf, &peer_arg);
  11438. ++ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
  11439. + if (ret) {
  11440. +- ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
  11441. ++ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
  11442. + bss_conf->bssid, arvif->vdev_id, ret);
  11443. + rcu_read_unlock();
  11444. + return;
  11445. +@@ -1500,88 +1911,100 @@ static void ath10k_bss_assoc(struct ieee
  11446. +
  11447. + ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
  11448. + if (ret) {
  11449. +- ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
  11450. ++ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
  11451. + bss_conf->bssid, arvif->vdev_id, ret);
  11452. + return;
  11453. + }
  11454. +
  11455. + ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
  11456. + if (ret) {
  11457. +- ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
  11458. ++ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
  11459. + arvif->vdev_id, ret);
  11460. + return;
  11461. + }
  11462. +
  11463. +- ath10k_dbg(ATH10K_DBG_MAC,
  11464. ++ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
  11465. ++ if (ret) {
  11466. ++ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
  11467. ++ arvif->vdev_id, bss_conf->bssid, ret);
  11468. ++ return;
  11469. ++ }
  11470. ++
  11471. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  11472. + "mac vdev %d up (associated) bssid %pM aid %d\n",
  11473. + arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
  11474. +
  11475. ++ WARN_ON(arvif->is_up);
  11476. ++
  11477. + arvif->aid = bss_conf->aid;
  11478. +- memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
  11479. ++ ether_addr_copy(arvif->bssid, bss_conf->bssid);
  11480. +
  11481. + ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
  11482. + if (ret) {
  11483. +- ath10k_warn("failed to set vdev %d up: %d\n",
  11484. ++ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
  11485. + arvif->vdev_id, ret);
  11486. + return;
  11487. + }
  11488. +
  11489. + arvif->is_up = true;
  11490. ++
  11491. ++ /* Workaround: Some firmware revisions (tested with qca6174
  11492. ++ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
  11493. ++ * poked with peer param command.
  11494. ++ */
  11495. ++ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
  11496. ++ WMI_PEER_DUMMY_VAR, 1);
  11497. ++ if (ret) {
  11498. ++ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
  11499. ++ arvif->bssid, arvif->vdev_id, ret);
  11500. ++ return;
  11501. ++ }
  11502. + }
  11503. +
  11504. +-/*
  11505. +- * FIXME: flush TIDs
  11506. +- */
  11507. + static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
  11508. + struct ieee80211_vif *vif)
  11509. + {
  11510. + struct ath10k *ar = hw->priv;
  11511. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11512. ++ struct ieee80211_sta_vht_cap vht_cap = {};
  11513. + int ret;
  11514. +
  11515. + lockdep_assert_held(&ar->conf_mutex);
  11516. +
  11517. +- /*
  11518. +- * For some reason, calling VDEV-DOWN before VDEV-STOP
  11519. +- * makes the FW to send frames via HTT after disassociation.
  11520. +- * No idea why this happens, even though VDEV-DOWN is supposed
  11521. +- * to be analogous to link down, so just stop the VDEV.
  11522. +- */
  11523. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
  11524. +- arvif->vdev_id);
  11525. +-
  11526. +- /* FIXME: check return value */
  11527. +- ret = ath10k_vdev_stop(arvif);
  11528. +-
  11529. +- /*
  11530. +- * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
  11531. +- * report beacons from previously associated network through HTT.
  11532. +- * This in turn would spam mac80211 WARN_ON if we bring down all
  11533. +- * interfaces as it expects there is no rx when no interface is
  11534. +- * running.
  11535. +- */
  11536. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
  11537. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
  11538. ++ arvif->vdev_id, arvif->bssid);
  11539. +
  11540. +- /* FIXME: why don't we print error if wmi call fails? */
  11541. + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
  11542. ++ if (ret)
  11543. ++ ath10k_warn(ar, "faield to down vdev %i: %d\n",
  11544. ++ arvif->vdev_id, ret);
  11545. +
  11546. +- arvif->def_wep_key_idx = 0;
  11547. ++ arvif->def_wep_key_idx = -1;
  11548. ++
  11549. ++ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
  11550. ++ if (ret) {
  11551. ++ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
  11552. ++ arvif->vdev_id, ret);
  11553. ++ return;
  11554. ++ }
  11555. +
  11556. +- arvif->is_started = false;
  11557. + arvif->is_up = false;
  11558. + }
  11559. +
  11560. +-static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
  11561. +- struct ieee80211_sta *sta, bool reassoc)
  11562. ++static int ath10k_station_assoc(struct ath10k *ar,
  11563. ++ struct ieee80211_vif *vif,
  11564. ++ struct ieee80211_sta *sta,
  11565. ++ bool reassoc)
  11566. + {
  11567. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11568. + struct wmi_peer_assoc_complete_arg peer_arg;
  11569. + int ret = 0;
  11570. +
  11571. + lockdep_assert_held(&ar->conf_mutex);
  11572. +
  11573. +- ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
  11574. ++ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
  11575. + if (ret) {
  11576. +- ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
  11577. ++ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
  11578. + sta->addr, arvif->vdev_id, ret);
  11579. + return ret;
  11580. + }
  11581. +@@ -1589,48 +2012,59 @@ static int ath10k_station_assoc(struct a
  11582. + peer_arg.peer_reassoc = reassoc;
  11583. + ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
  11584. + if (ret) {
  11585. +- ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
  11586. ++ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
  11587. + sta->addr, arvif->vdev_id, ret);
  11588. + return ret;
  11589. + }
  11590. +
  11591. +- ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
  11592. +- if (ret) {
  11593. +- ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
  11594. +- arvif->vdev_id, ret);
  11595. +- return ret;
  11596. +- }
  11597. +-
  11598. +- if (!sta->wme) {
  11599. +- arvif->num_legacy_stations++;
  11600. +- ret = ath10k_recalc_rtscts_prot(arvif);
  11601. ++ /* Re-assoc is run only to update supported rates for given station. It
  11602. ++ * doesn't make much sense to reconfigure the peer completely.
  11603. ++ */
  11604. ++ if (!reassoc) {
  11605. ++ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
  11606. ++ &sta->ht_cap);
  11607. + if (ret) {
  11608. +- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
  11609. ++ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
  11610. + arvif->vdev_id, ret);
  11611. + return ret;
  11612. + }
  11613. +- }
  11614. +
  11615. +- ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
  11616. +- if (ret) {
  11617. +- ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
  11618. +- arvif->vdev_id, ret);
  11619. +- return ret;
  11620. +- }
  11621. ++ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
  11622. ++ if (ret) {
  11623. ++ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
  11624. ++ sta->addr, arvif->vdev_id, ret);
  11625. ++ return ret;
  11626. ++ }
  11627. +
  11628. +- ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
  11629. +- if (ret) {
  11630. +- ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
  11631. +- sta->addr, arvif->vdev_id, ret);
  11632. +- return ret;
  11633. ++ if (!sta->wme) {
  11634. ++ arvif->num_legacy_stations++;
  11635. ++ ret = ath10k_recalc_rtscts_prot(arvif);
  11636. ++ if (ret) {
  11637. ++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
  11638. ++ arvif->vdev_id, ret);
  11639. ++ return ret;
  11640. ++ }
  11641. ++ }
  11642. ++
  11643. ++ /* Plumb cached keys only for static WEP */
  11644. ++ if (arvif->def_wep_key_idx != -1) {
  11645. ++ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
  11646. ++ if (ret) {
  11647. ++ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
  11648. ++ arvif->vdev_id, ret);
  11649. ++ return ret;
  11650. ++ }
  11651. ++ }
  11652. + }
  11653. +
  11654. + return ret;
  11655. + }
  11656. +
  11657. +-static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
  11658. ++static int ath10k_station_disassoc(struct ath10k *ar,
  11659. ++ struct ieee80211_vif *vif,
  11660. + struct ieee80211_sta *sta)
  11661. + {
  11662. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11663. + int ret = 0;
  11664. +
  11665. + lockdep_assert_held(&ar->conf_mutex);
  11666. +@@ -1639,7 +2073,7 @@ static int ath10k_station_disassoc(struc
  11667. + arvif->num_legacy_stations--;
  11668. + ret = ath10k_recalc_rtscts_prot(arvif);
  11669. + if (ret) {
  11670. +- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
  11671. ++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
  11672. + arvif->vdev_id, ret);
  11673. + return ret;
  11674. + }
  11675. +@@ -1647,7 +2081,7 @@ static int ath10k_station_disassoc(struc
  11676. +
  11677. + ret = ath10k_clear_peer_keys(arvif, sta->addr);
  11678. + if (ret) {
  11679. +- ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
  11680. ++ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
  11681. + arvif->vdev_id, ret);
  11682. + return ret;
  11683. + }
  11684. +@@ -1722,6 +2156,7 @@ static int ath10k_update_channel_list(st
  11685. + ch->passive = passive;
  11686. +
  11687. + ch->freq = channel->center_freq;
  11688. ++ ch->band_center_freq1 = channel->center_freq;
  11689. + ch->min_power = 0;
  11690. + ch->max_power = channel->max_power * 2;
  11691. + ch->max_reg_power = channel->max_reg_power * 2;
  11692. +@@ -1739,7 +2174,7 @@ static int ath10k_update_channel_list(st
  11693. + if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
  11694. + continue;
  11695. +
  11696. +- ath10k_dbg(ATH10K_DBG_WMI,
  11697. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  11698. + "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
  11699. + ch - arg.channels, arg.n_channels,
  11700. + ch->freq, ch->max_power, ch->max_reg_power,
  11701. +@@ -1782,7 +2217,7 @@ static void ath10k_regd_update(struct at
  11702. +
  11703. + ret = ath10k_update_channel_list(ar);
  11704. + if (ret)
  11705. +- ath10k_warn("failed to update channel list: %d\n", ret);
  11706. ++ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
  11707. +
  11708. + regpair = ar->ath_common.regulatory.regpair;
  11709. +
  11710. +@@ -1803,7 +2238,7 @@ static void ath10k_regd_update(struct at
  11711. + regpair->reg_5ghz_ctl,
  11712. + wmi_dfs_reg);
  11713. + if (ret)
  11714. +- ath10k_warn("failed to set pdev regdomain: %d\n", ret);
  11715. ++ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
  11716. + }
  11717. +
  11718. + static void ath10k_reg_notifier(struct wiphy *wiphy,
  11719. +@@ -1816,12 +2251,12 @@ static void ath10k_reg_notifier(struct w
  11720. + ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
  11721. +
  11722. + if (config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
  11723. +- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
  11724. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
  11725. + request->dfs_region);
  11726. + result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
  11727. + request->dfs_region);
  11728. + if (!result)
  11729. +- ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
  11730. ++ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
  11731. + request->dfs_region);
  11732. + }
  11733. +
  11734. +@@ -1849,28 +2284,25 @@ static u8 ath10k_tx_h_get_tid(struct iee
  11735. + return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  11736. + }
  11737. +
  11738. +-static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
  11739. +- struct ieee80211_tx_info *info)
  11740. ++static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
  11741. + {
  11742. +- if (info->control.vif)
  11743. +- return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
  11744. ++ if (vif)
  11745. ++ return ath10k_vif_to_arvif(vif)->vdev_id;
  11746. +
  11747. + if (ar->monitor_started)
  11748. + return ar->monitor_vdev_id;
  11749. +
  11750. +- ath10k_warn("failed to resolve vdev id\n");
  11751. ++ ath10k_warn(ar, "failed to resolve vdev id\n");
  11752. + return 0;
  11753. + }
  11754. +
  11755. +-/*
  11756. +- * Frames sent to the FW have to be in "Native Wifi" format.
  11757. +- * Strip the QoS field from the 802.11 header.
  11758. ++/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  11759. ++ * Control in the header.
  11760. + */
  11761. +-static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
  11762. +- struct ieee80211_tx_control *control,
  11763. +- struct sk_buff *skb)
  11764. ++static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
  11765. + {
  11766. + struct ieee80211_hdr *hdr = (void *)skb->data;
  11767. ++ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
  11768. + u8 *qos_ctl;
  11769. +
  11770. + if (!ieee80211_is_data_qos(hdr->frame_control))
  11771. +@@ -1880,68 +2312,24 @@ static void ath10k_tx_h_qos_workaround(s
  11772. + memmove(skb->data + IEEE80211_QOS_CTL_LEN,
  11773. + skb->data, (void *)qos_ctl - (void *)skb->data);
  11774. + skb_pull(skb, IEEE80211_QOS_CTL_LEN);
  11775. +-}
  11776. +-
  11777. +-static void ath10k_tx_wep_key_work(struct work_struct *work)
  11778. +-{
  11779. +- struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
  11780. +- wep_key_work);
  11781. +- int ret, keyidx = arvif->def_wep_key_newidx;
  11782. +-
  11783. +- if (arvif->def_wep_key_idx == keyidx)
  11784. +- return;
  11785. +-
  11786. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
  11787. +- arvif->vdev_id, keyidx);
  11788. +
  11789. +- ret = ath10k_wmi_vdev_set_param(arvif->ar,
  11790. +- arvif->vdev_id,
  11791. +- arvif->ar->wmi.vdev_param->def_keyid,
  11792. +- keyidx);
  11793. +- if (ret) {
  11794. +- ath10k_warn("failed to update wep key index for vdev %d: %d\n",
  11795. +- arvif->vdev_id,
  11796. +- ret);
  11797. +- return;
  11798. ++ /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
  11799. ++ * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
  11800. ++ * used only for CQM purposes (e.g. hostapd station keepalive ping) so
  11801. ++ * it is safe to downgrade to NullFunc.
  11802. ++ */
  11803. ++ hdr = (void *)skb->data;
  11804. ++ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
  11805. ++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
  11806. ++ cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
  11807. + }
  11808. +-
  11809. +- arvif->def_wep_key_idx = keyidx;
  11810. + }
  11811. +
  11812. +-static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
  11813. +-{
  11814. +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  11815. +- struct ieee80211_vif *vif = info->control.vif;
  11816. +- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11817. +- struct ath10k *ar = arvif->ar;
  11818. +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  11819. +- struct ieee80211_key_conf *key = info->control.hw_key;
  11820. +-
  11821. +- if (!ieee80211_has_protected(hdr->frame_control))
  11822. +- return;
  11823. +-
  11824. +- if (!key)
  11825. +- return;
  11826. +-
  11827. +- if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
  11828. +- key->cipher != WLAN_CIPHER_SUITE_WEP104)
  11829. +- return;
  11830. +-
  11831. +- if (key->keyidx == arvif->def_wep_key_idx)
  11832. +- return;
  11833. +-
  11834. +- /* FIXME: Most likely a few frames will be TXed with an old key. Simply
  11835. +- * queueing frames until key index is updated is not an option because
  11836. +- * sk_buff may need more processing to be done, e.g. offchannel */
  11837. +- arvif->def_wep_key_newidx = key->keyidx;
  11838. +- ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
  11839. +-}
  11840. +-
  11841. +-static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
  11842. ++static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
  11843. ++ struct ieee80211_vif *vif,
  11844. ++ struct sk_buff *skb)
  11845. + {
  11846. + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  11847. +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  11848. +- struct ieee80211_vif *vif = info->control.vif;
  11849. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  11850. +
  11851. + /* This is case only for P2P_GO */
  11852. +@@ -1961,6 +2349,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(s
  11853. + }
  11854. + }
  11855. +
  11856. ++static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
  11857. ++{
  11858. ++ /* FIXME: Not really sure since when the behaviour changed. At some
  11859. ++ * point new firmware stopped requiring creation of peer entries for
  11860. ++ * offchannel tx (and actually creating them causes issues with wmi-htc
  11861. ++ * tx credit replenishment and reliability). Assuming it's at least 3.4
  11862. ++ * because that's when the `freq` was introduced to TX_FRM HTT command.
  11863. ++ */
  11864. ++ return !(ar->htt.target_version_major >= 3 &&
  11865. ++ ar->htt.target_version_minor >= 4);
  11866. ++}
  11867. ++
  11868. + static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
  11869. + {
  11870. + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  11871. +@@ -1977,7 +2377,7 @@ static void ath10k_tx_htt(struct ath10k
  11872. + ar->fw_features)) {
  11873. + if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
  11874. + ATH10K_MAX_NUM_MGMT_PENDING) {
  11875. +- ath10k_warn("reached WMI management tranmist queue limit\n");
  11876. ++ ath10k_warn(ar, "reached WMI management transmit queue limit\n");
  11877. + ret = -EBUSY;
  11878. + goto exit;
  11879. + }
  11880. +@@ -2001,7 +2401,8 @@ static void ath10k_tx_htt(struct ath10k
  11881. +
  11882. + exit:
  11883. + if (ret) {
  11884. +- ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
  11885. ++ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
  11886. ++ ret);
  11887. + ieee80211_free_txskb(ar->hw, skb);
  11888. + }
  11889. + }
  11890. +@@ -2043,7 +2444,7 @@ void ath10k_offchan_tx_work(struct work_
  11891. +
  11892. + mutex_lock(&ar->conf_mutex);
  11893. +
  11894. +- ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
  11895. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
  11896. + skb);
  11897. +
  11898. + hdr = (struct ieee80211_hdr *)skb->data;
  11899. +@@ -2056,13 +2457,13 @@ void ath10k_offchan_tx_work(struct work_
  11900. +
  11901. + if (peer)
  11902. + /* FIXME: should this use ath10k_warn()? */
  11903. +- ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
  11904. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
  11905. + peer_addr, vdev_id);
  11906. +
  11907. + if (!peer) {
  11908. + ret = ath10k_peer_create(ar, vdev_id, peer_addr);
  11909. + if (ret)
  11910. +- ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
  11911. ++ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
  11912. + peer_addr, vdev_id, ret);
  11913. + }
  11914. +
  11915. +@@ -2075,14 +2476,14 @@ void ath10k_offchan_tx_work(struct work_
  11916. +
  11917. + ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
  11918. + 3 * HZ);
  11919. +- if (ret <= 0)
  11920. +- ath10k_warn("timed out waiting for offchannel skb %p\n",
  11921. ++ if (ret == 0)
  11922. ++ ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
  11923. + skb);
  11924. +
  11925. + if (!peer) {
  11926. + ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
  11927. + if (ret)
  11928. +- ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
  11929. ++ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
  11930. + peer_addr, vdev_id, ret);
  11931. + }
  11932. +
  11933. +@@ -2116,7 +2517,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct
  11934. +
  11935. + ret = ath10k_wmi_mgmt_tx(ar, skb);
  11936. + if (ret) {
  11937. +- ath10k_warn("failed to transmit management frame via WMI: %d\n",
  11938. ++ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
  11939. + ret);
  11940. + ieee80211_free_txskb(ar->hw, skb);
  11941. + }
  11942. +@@ -2127,34 +2528,41 @@ void ath10k_mgmt_over_wmi_tx_work(struct
  11943. + /* Scanning */
  11944. + /************/
  11945. +
  11946. +-/*
  11947. +- * This gets called if we dont get a heart-beat during scan.
  11948. +- * This may indicate the FW has hung and we need to abort the
  11949. +- * scan manually to prevent cancel_hw_scan() from deadlocking
  11950. +- */
  11951. +-void ath10k_reset_scan(unsigned long ptr)
  11952. ++void __ath10k_scan_finish(struct ath10k *ar)
  11953. + {
  11954. +- struct ath10k *ar = (struct ath10k *)ptr;
  11955. +-
  11956. +- spin_lock_bh(&ar->data_lock);
  11957. +- if (!ar->scan.in_progress) {
  11958. +- spin_unlock_bh(&ar->data_lock);
  11959. +- return;
  11960. +- }
  11961. ++ lockdep_assert_held(&ar->data_lock);
  11962. +
  11963. +- ath10k_warn("scan timed out, firmware problem?\n");
  11964. +-
  11965. +- if (ar->scan.is_roc)
  11966. +- ieee80211_remain_on_channel_expired(ar->hw);
  11967. +- else
  11968. +- ieee80211_scan_completed(ar->hw, 1 /* aborted */);
  11969. ++ switch (ar->scan.state) {
  11970. ++ case ATH10K_SCAN_IDLE:
  11971. ++ break;
  11972. ++ case ATH10K_SCAN_RUNNING:
  11973. ++ if (ar->scan.is_roc)
  11974. ++ ieee80211_remain_on_channel_expired(ar->hw);
  11975. ++ /* fall through */
  11976. ++ case ATH10K_SCAN_ABORTING:
  11977. ++ if (!ar->scan.is_roc)
  11978. ++ ieee80211_scan_completed(ar->hw,
  11979. ++ (ar->scan.state ==
  11980. ++ ATH10K_SCAN_ABORTING));
  11981. ++ /* fall through */
  11982. ++ case ATH10K_SCAN_STARTING:
  11983. ++ ar->scan.state = ATH10K_SCAN_IDLE;
  11984. ++ ar->scan_channel = NULL;
  11985. ++ ath10k_offchan_tx_purge(ar);
  11986. ++ cancel_delayed_work(&ar->scan.timeout);
  11987. ++ complete_all(&ar->scan.completed);
  11988. ++ break;
  11989. ++ }
  11990. ++}
  11991. +
  11992. +- ar->scan.in_progress = false;
  11993. +- complete_all(&ar->scan.completed);
  11994. ++void ath10k_scan_finish(struct ath10k *ar)
  11995. ++{
  11996. ++ spin_lock_bh(&ar->data_lock);
  11997. ++ __ath10k_scan_finish(ar);
  11998. + spin_unlock_bh(&ar->data_lock);
  11999. + }
  12000. +
  12001. +-static int ath10k_abort_scan(struct ath10k *ar)
  12002. ++static int ath10k_scan_stop(struct ath10k *ar)
  12003. + {
  12004. + struct wmi_stop_scan_arg arg = {
  12005. + .req_id = 1, /* FIXME */
  12006. +@@ -2165,47 +2573,79 @@ static int ath10k_abort_scan(struct ath1
  12007. +
  12008. + lockdep_assert_held(&ar->conf_mutex);
  12009. +
  12010. +- del_timer_sync(&ar->scan.timeout);
  12011. ++ ret = ath10k_wmi_stop_scan(ar, &arg);
  12012. ++ if (ret) {
  12013. ++ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
  12014. ++ goto out;
  12015. ++ }
  12016. +
  12017. +- spin_lock_bh(&ar->data_lock);
  12018. +- if (!ar->scan.in_progress) {
  12019. +- spin_unlock_bh(&ar->data_lock);
  12020. +- return 0;
  12021. ++ ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
  12022. ++ if (ret == 0) {
  12023. ++ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
  12024. ++ ret = -ETIMEDOUT;
  12025. ++ } else if (ret > 0) {
  12026. ++ ret = 0;
  12027. + }
  12028. +
  12029. +- ar->scan.aborting = true;
  12030. ++out:
  12031. ++ /* Scan state should be updated upon scan completion but in case
  12032. ++ * firmware fails to deliver the event (for whatever reason) it is
  12033. ++ * desired to clean up scan state anyway. Firmware may have just
  12034. ++ * dropped the scan completion event delivery due to transport pipe
  12035. ++ * being overflown with data and/or it can recover on its own before
  12036. ++ * next scan request is submitted.
  12037. ++ */
  12038. ++ spin_lock_bh(&ar->data_lock);
  12039. ++ if (ar->scan.state != ATH10K_SCAN_IDLE)
  12040. ++ __ath10k_scan_finish(ar);
  12041. + spin_unlock_bh(&ar->data_lock);
  12042. +
  12043. +- ret = ath10k_wmi_stop_scan(ar, &arg);
  12044. +- if (ret) {
  12045. +- ath10k_warn("failed to stop wmi scan: %d\n", ret);
  12046. +- spin_lock_bh(&ar->data_lock);
  12047. +- ar->scan.in_progress = false;
  12048. +- ath10k_offchan_tx_purge(ar);
  12049. +- spin_unlock_bh(&ar->data_lock);
  12050. +- return -EIO;
  12051. +- }
  12052. ++ return ret;
  12053. ++}
  12054. +
  12055. +- ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
  12056. +- if (ret == 0)
  12057. +- ath10k_warn("timed out while waiting for scan to stop\n");
  12058. ++static void ath10k_scan_abort(struct ath10k *ar)
  12059. ++{
  12060. ++ int ret;
  12061. +
  12062. +- /* scan completion may be done right after we timeout here, so let's
  12063. +- * check the in_progress and tell mac80211 scan is completed. if we
  12064. +- * don't do that and FW fails to send us scan completion indication
  12065. +- * then userspace won't be able to scan anymore */
  12066. +- ret = 0;
  12067. ++ lockdep_assert_held(&ar->conf_mutex);
  12068. +
  12069. + spin_lock_bh(&ar->data_lock);
  12070. +- if (ar->scan.in_progress) {
  12071. +- ath10k_warn("failed to stop scan, it's still in progress\n");
  12072. +- ar->scan.in_progress = false;
  12073. +- ath10k_offchan_tx_purge(ar);
  12074. +- ret = -ETIMEDOUT;
  12075. ++
  12076. ++ switch (ar->scan.state) {
  12077. ++ case ATH10K_SCAN_IDLE:
  12078. ++ /* This can happen if timeout worker kicked in and called
  12079. ++ * abortion while scan completion was being processed.
  12080. ++ */
  12081. ++ break;
  12082. ++ case ATH10K_SCAN_STARTING:
  12083. ++ case ATH10K_SCAN_ABORTING:
  12084. ++ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
  12085. ++ ath10k_scan_state_str(ar->scan.state),
  12086. ++ ar->scan.state);
  12087. ++ break;
  12088. ++ case ATH10K_SCAN_RUNNING:
  12089. ++ ar->scan.state = ATH10K_SCAN_ABORTING;
  12090. ++ spin_unlock_bh(&ar->data_lock);
  12091. ++
  12092. ++ ret = ath10k_scan_stop(ar);
  12093. ++ if (ret)
  12094. ++ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
  12095. ++
  12096. ++ spin_lock_bh(&ar->data_lock);
  12097. ++ break;
  12098. + }
  12099. ++
  12100. + spin_unlock_bh(&ar->data_lock);
  12101. ++}
  12102. +
  12103. +- return ret;
  12104. ++void ath10k_scan_timeout_work(struct work_struct *work)
  12105. ++{
  12106. ++ struct ath10k *ar = container_of(work, struct ath10k,
  12107. ++ scan.timeout.work);
  12108. ++
  12109. ++ mutex_lock(&ar->conf_mutex);
  12110. ++ ath10k_scan_abort(ar);
  12111. ++ mutex_unlock(&ar->conf_mutex);
  12112. + }
  12113. +
  12114. + static int ath10k_start_scan(struct ath10k *ar,
  12115. +@@ -2221,17 +2661,27 @@ static int ath10k_start_scan(struct ath1
  12116. +
  12117. + ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
  12118. + if (ret == 0) {
  12119. +- ath10k_abort_scan(ar);
  12120. +- return ret;
  12121. ++ ret = ath10k_scan_stop(ar);
  12122. ++ if (ret)
  12123. ++ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
  12124. ++
  12125. ++ return -ETIMEDOUT;
  12126. ++ }
  12127. ++
  12128. ++ /* If we failed to start the scan, return error code at
  12129. ++ * this point. This is probably due to some issue in the
  12130. ++ * firmware, but no need to wedge the driver due to that...
  12131. ++ */
  12132. ++ spin_lock_bh(&ar->data_lock);
  12133. ++ if (ar->scan.state == ATH10K_SCAN_IDLE) {
  12134. ++ spin_unlock_bh(&ar->data_lock);
  12135. ++ return -EINVAL;
  12136. + }
  12137. ++ spin_unlock_bh(&ar->data_lock);
  12138. +
  12139. +- /* the scan can complete earlier, before we even
  12140. +- * start the timer. in that case the timer handler
  12141. +- * checks ar->scan.in_progress and bails out if its
  12142. +- * false. Add a 200ms margin to account event/command
  12143. +- * processing. */
  12144. +- mod_timer(&ar->scan.timeout, jiffies +
  12145. +- msecs_to_jiffies(arg->max_scan_time+200));
  12146. ++ /* Add a 200ms margin to account for event/command processing */
  12147. ++ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
  12148. ++ msecs_to_jiffies(arg->max_scan_time+200));
  12149. + return 0;
  12150. + }
  12151. +
  12152. +@@ -2243,90 +2693,163 @@ static void ath10k_tx(struct ieee80211_h
  12153. + struct ieee80211_tx_control *control,
  12154. + struct sk_buff *skb)
  12155. + {
  12156. ++ struct ath10k *ar = hw->priv;
  12157. + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  12158. ++ struct ieee80211_vif *vif = info->control.vif;
  12159. + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  12160. +- struct ath10k *ar = hw->priv;
  12161. +- u8 tid, vdev_id;
  12162. +
  12163. + /* We should disable CCK RATE due to P2P */
  12164. + if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
  12165. +- ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
  12166. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
  12167. +
  12168. +- /* we must calculate tid before we apply qos workaround
  12169. +- * as we'd lose the qos control field */
  12170. +- tid = ath10k_tx_h_get_tid(hdr);
  12171. +- vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
  12172. ++ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
  12173. ++ ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
  12174. ++ ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
  12175. +
  12176. + /* it makes no sense to process injected frames like that */
  12177. +- if (info->control.vif &&
  12178. +- info->control.vif->type != NL80211_IFTYPE_MONITOR) {
  12179. +- ath10k_tx_h_qos_workaround(hw, control, skb);
  12180. +- ath10k_tx_h_update_wep_key(skb);
  12181. +- ath10k_tx_h_add_p2p_noa_ie(ar, skb);
  12182. +- ath10k_tx_h_seq_no(skb);
  12183. ++ if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
  12184. ++ ath10k_tx_h_nwifi(hw, skb);
  12185. ++ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
  12186. ++ ath10k_tx_h_seq_no(vif, skb);
  12187. + }
  12188. +
  12189. +- ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
  12190. +- ATH10K_SKB_CB(skb)->htt.is_offchan = false;
  12191. +- ATH10K_SKB_CB(skb)->htt.tid = tid;
  12192. +-
  12193. + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
  12194. + spin_lock_bh(&ar->data_lock);
  12195. +- ATH10K_SKB_CB(skb)->htt.is_offchan = true;
  12196. ++ ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
  12197. + ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
  12198. + spin_unlock_bh(&ar->data_lock);
  12199. +
  12200. +- ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
  12201. ++ if (ath10k_mac_need_offchan_tx_work(ar)) {
  12202. ++ ATH10K_SKB_CB(skb)->htt.freq = 0;
  12203. ++ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
  12204. +
  12205. +- skb_queue_tail(&ar->offchan_tx_queue, skb);
  12206. +- ieee80211_queue_work(hw, &ar->offchan_tx_work);
  12207. +- return;
  12208. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
  12209. ++ skb);
  12210. ++
  12211. ++ skb_queue_tail(&ar->offchan_tx_queue, skb);
  12212. ++ ieee80211_queue_work(hw, &ar->offchan_tx_work);
  12213. ++ return;
  12214. ++ }
  12215. + }
  12216. +
  12217. + ath10k_tx_htt(ar, skb);
  12218. + }
  12219. +
  12220. +-/*
  12221. +- * Initialize various parameters with default vaules.
  12222. +- */
  12223. ++/* Must not be called with conf_mutex held as workers can use that also. */
  12224. ++void ath10k_drain_tx(struct ath10k *ar)
  12225. ++{
  12226. ++ /* make sure rcu-protected mac80211 tx path itself is drained */
  12227. ++ synchronize_net();
  12228. ++
  12229. ++ ath10k_offchan_tx_purge(ar);
  12230. ++ ath10k_mgmt_over_wmi_tx_purge(ar);
  12231. ++
  12232. ++ cancel_work_sync(&ar->offchan_tx_work);
  12233. ++ cancel_work_sync(&ar->wmi_mgmt_tx_work);
  12234. ++}
  12235. ++
  12236. + void ath10k_halt(struct ath10k *ar)
  12237. + {
  12238. + struct ath10k_vif *arvif;
  12239. +
  12240. + lockdep_assert_held(&ar->conf_mutex);
  12241. +
  12242. +- if (ath10k_monitor_is_enabled(ar)) {
  12243. +- clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  12244. +- ar->promisc = false;
  12245. +- ar->monitor = false;
  12246. ++ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
  12247. ++ ar->filter_flags = 0;
  12248. ++ ar->monitor = false;
  12249. ++
  12250. ++ if (ar->monitor_started)
  12251. + ath10k_monitor_stop(ar);
  12252. +- }
  12253. +
  12254. +- del_timer_sync(&ar->scan.timeout);
  12255. +- ath10k_offchan_tx_purge(ar);
  12256. +- ath10k_mgmt_over_wmi_tx_purge(ar);
  12257. ++ ar->monitor_started = false;
  12258. ++
  12259. ++ ath10k_scan_finish(ar);
  12260. + ath10k_peer_cleanup_all(ar);
  12261. + ath10k_core_stop(ar);
  12262. + ath10k_hif_power_down(ar);
  12263. +
  12264. + spin_lock_bh(&ar->data_lock);
  12265. +- if (ar->scan.in_progress) {
  12266. +- del_timer(&ar->scan.timeout);
  12267. +- ar->scan.in_progress = false;
  12268. +- ieee80211_scan_completed(ar->hw, true);
  12269. ++ list_for_each_entry(arvif, &ar->arvifs, list)
  12270. ++ ath10k_mac_vif_beacon_cleanup(arvif);
  12271. ++ spin_unlock_bh(&ar->data_lock);
  12272. ++}
  12273. ++
  12274. ++static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
  12275. ++{
  12276. ++ struct ath10k *ar = hw->priv;
  12277. ++
  12278. ++ mutex_lock(&ar->conf_mutex);
  12279. ++
  12280. ++ if (ar->cfg_tx_chainmask) {
  12281. ++ *tx_ant = ar->cfg_tx_chainmask;
  12282. ++ *rx_ant = ar->cfg_rx_chainmask;
  12283. ++ } else {
  12284. ++ *tx_ant = ar->supp_tx_chainmask;
  12285. ++ *rx_ant = ar->supp_rx_chainmask;
  12286. + }
  12287. +
  12288. +- list_for_each_entry(arvif, &ar->arvifs, list) {
  12289. +- if (!arvif->beacon)
  12290. +- continue;
  12291. ++ mutex_unlock(&ar->conf_mutex);
  12292. +
  12293. +- dma_unmap_single(arvif->ar->dev,
  12294. +- ATH10K_SKB_CB(arvif->beacon)->paddr,
  12295. +- arvif->beacon->len, DMA_TO_DEVICE);
  12296. +- dev_kfree_skb_any(arvif->beacon);
  12297. +- arvif->beacon = NULL;
  12298. ++ return 0;
  12299. ++}
  12300. ++
  12301. ++static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
  12302. ++{
  12303. ++ /* It is not clear that allowing gaps in chainmask
  12304. ++ * is helpful. Probably it will not do what user
  12305. ++ * is hoping for, so warn in that case.
  12306. ++ */
  12307. ++ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
  12308. ++ return;
  12309. ++
  12310. ++ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
  12311. ++ dbg, cm);
  12312. ++}
  12313. ++
  12314. ++static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
  12315. ++{
  12316. ++ int ret;
  12317. ++
  12318. ++ lockdep_assert_held(&ar->conf_mutex);
  12319. ++
  12320. ++ ath10k_check_chain_mask(ar, tx_ant, "tx");
  12321. ++ ath10k_check_chain_mask(ar, rx_ant, "rx");
  12322. ++
  12323. ++ ar->cfg_tx_chainmask = tx_ant;
  12324. ++ ar->cfg_rx_chainmask = rx_ant;
  12325. ++
  12326. ++ if ((ar->state != ATH10K_STATE_ON) &&
  12327. ++ (ar->state != ATH10K_STATE_RESTARTED))
  12328. ++ return 0;
  12329. ++
  12330. ++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
  12331. ++ tx_ant);
  12332. ++ if (ret) {
  12333. ++ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
  12334. ++ ret, tx_ant);
  12335. ++ return ret;
  12336. + }
  12337. +- spin_unlock_bh(&ar->data_lock);
  12338. ++
  12339. ++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
  12340. ++ rx_ant);
  12341. ++ if (ret) {
  12342. ++ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
  12343. ++ ret, rx_ant);
  12344. ++ return ret;
  12345. ++ }
  12346. ++
  12347. ++ return 0;
  12348. ++}
  12349. ++
  12350. ++static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
  12351. ++{
  12352. ++ struct ath10k *ar = hw->priv;
  12353. ++ int ret;
  12354. ++
  12355. ++ mutex_lock(&ar->conf_mutex);
  12356. ++ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
  12357. ++ mutex_unlock(&ar->conf_mutex);
  12358. ++ return ret;
  12359. + }
  12360. +
  12361. + static int ath10k_start(struct ieee80211_hw *hw)
  12362. +@@ -2334,41 +2857,61 @@ static int ath10k_start(struct ieee80211
  12363. + struct ath10k *ar = hw->priv;
  12364. + int ret = 0;
  12365. +
  12366. ++ /*
  12367. ++ * This makes sense only when restarting hw. It is harmless to call
  12368. ++ * uncoditionally. This is necessary to make sure no HTT/WMI tx
  12369. ++ * commands will be submitted while restarting.
  12370. ++ */
  12371. ++ ath10k_drain_tx(ar);
  12372. ++
  12373. + mutex_lock(&ar->conf_mutex);
  12374. +
  12375. +- if (ar->state != ATH10K_STATE_OFF &&
  12376. +- ar->state != ATH10K_STATE_RESTARTING) {
  12377. ++ switch (ar->state) {
  12378. ++ case ATH10K_STATE_OFF:
  12379. ++ ar->state = ATH10K_STATE_ON;
  12380. ++ break;
  12381. ++ case ATH10K_STATE_RESTARTING:
  12382. ++ ath10k_halt(ar);
  12383. ++ ar->state = ATH10K_STATE_RESTARTED;
  12384. ++ break;
  12385. ++ case ATH10K_STATE_ON:
  12386. ++ case ATH10K_STATE_RESTARTED:
  12387. ++ case ATH10K_STATE_WEDGED:
  12388. ++ WARN_ON(1);
  12389. + ret = -EINVAL;
  12390. +- goto exit;
  12391. ++ goto err;
  12392. ++ case ATH10K_STATE_UTF:
  12393. ++ ret = -EBUSY;
  12394. ++ goto err;
  12395. + }
  12396. +
  12397. + ret = ath10k_hif_power_up(ar);
  12398. + if (ret) {
  12399. +- ath10k_err("Could not init hif: %d\n", ret);
  12400. +- ar->state = ATH10K_STATE_OFF;
  12401. +- goto exit;
  12402. ++ ath10k_err(ar, "Could not init hif: %d\n", ret);
  12403. ++ goto err_off;
  12404. + }
  12405. +
  12406. +- ret = ath10k_core_start(ar);
  12407. ++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
  12408. + if (ret) {
  12409. +- ath10k_err("Could not init core: %d\n", ret);
  12410. +- ath10k_hif_power_down(ar);
  12411. +- ar->state = ATH10K_STATE_OFF;
  12412. +- goto exit;
  12413. ++ ath10k_err(ar, "Could not init core: %d\n", ret);
  12414. ++ goto err_power_down;
  12415. + }
  12416. +
  12417. +- if (ar->state == ATH10K_STATE_OFF)
  12418. +- ar->state = ATH10K_STATE_ON;
  12419. +- else if (ar->state == ATH10K_STATE_RESTARTING)
  12420. +- ar->state = ATH10K_STATE_RESTARTED;
  12421. +-
  12422. + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
  12423. +- if (ret)
  12424. +- ath10k_warn("failed to enable PMF QOS: %d\n", ret);
  12425. ++ if (ret) {
  12426. ++ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
  12427. ++ goto err_core_stop;
  12428. ++ }
  12429. +
  12430. + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
  12431. +- if (ret)
  12432. +- ath10k_warn("failed to enable dynamic BW: %d\n", ret);
  12433. ++ if (ret) {
  12434. ++ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
  12435. ++ goto err_core_stop;
  12436. ++ }
  12437. ++
  12438. ++ if (ar->cfg_tx_chainmask)
  12439. ++ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
  12440. ++ ar->cfg_rx_chainmask);
  12441. +
  12442. + /*
  12443. + * By default FW set ARP frames ac to voice (6). In that case ARP
  12444. +@@ -2382,16 +2925,29 @@ static int ath10k_start(struct ieee80211
  12445. + ret = ath10k_wmi_pdev_set_param(ar,
  12446. + ar->wmi.pdev_param->arp_ac_override, 0);
  12447. + if (ret) {
  12448. +- ath10k_warn("failed to set arp ac override parameter: %d\n",
  12449. ++ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
  12450. + ret);
  12451. +- goto exit;
  12452. ++ goto err_core_stop;
  12453. + }
  12454. +
  12455. + ar->num_started_vdevs = 0;
  12456. + ath10k_regd_update(ar);
  12457. +- ret = 0;
  12458. +
  12459. +-exit:
  12460. ++ ath10k_spectral_start(ar);
  12461. ++
  12462. ++ mutex_unlock(&ar->conf_mutex);
  12463. ++ return 0;
  12464. ++
  12465. ++err_core_stop:
  12466. ++ ath10k_core_stop(ar);
  12467. ++
  12468. ++err_power_down:
  12469. ++ ath10k_hif_power_down(ar);
  12470. ++
  12471. ++err_off:
  12472. ++ ar->state = ATH10K_STATE_OFF;
  12473. ++
  12474. ++err:
  12475. + mutex_unlock(&ar->conf_mutex);
  12476. + return ret;
  12477. + }
  12478. +@@ -2400,19 +2956,16 @@ static void ath10k_stop(struct ieee80211
  12479. + {
  12480. + struct ath10k *ar = hw->priv;
  12481. +
  12482. ++ ath10k_drain_tx(ar);
  12483. ++
  12484. + mutex_lock(&ar->conf_mutex);
  12485. +- if (ar->state == ATH10K_STATE_ON ||
  12486. +- ar->state == ATH10K_STATE_RESTARTED ||
  12487. +- ar->state == ATH10K_STATE_WEDGED)
  12488. ++ if (ar->state != ATH10K_STATE_OFF) {
  12489. + ath10k_halt(ar);
  12490. +-
  12491. +- ar->state = ATH10K_STATE_OFF;
  12492. ++ ar->state = ATH10K_STATE_OFF;
  12493. ++ }
  12494. + mutex_unlock(&ar->conf_mutex);
  12495. +
  12496. +- ath10k_mgmt_over_wmi_tx_purge(ar);
  12497. +-
  12498. +- cancel_work_sync(&ar->offchan_tx_work);
  12499. +- cancel_work_sync(&ar->wmi_mgmt_tx_work);
  12500. ++ cancel_delayed_work_sync(&ar->scan.timeout);
  12501. + cancel_work_sync(&ar->restart_work);
  12502. + }
  12503. +
  12504. +@@ -2426,7 +2979,7 @@ static int ath10k_config_ps(struct ath10
  12505. + list_for_each_entry(arvif, &ar->arvifs, list) {
  12506. + ret = ath10k_mac_vif_setup_ps(arvif);
  12507. + if (ret) {
  12508. +- ath10k_warn("failed to setup powersave: %d\n", ret);
  12509. ++ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
  12510. + break;
  12511. + }
  12512. + }
  12513. +@@ -2464,7 +3017,7 @@ static void ath10k_config_chan(struct at
  12514. +
  12515. + lockdep_assert_held(&ar->conf_mutex);
  12516. +
  12517. +- ath10k_dbg(ATH10K_DBG_MAC,
  12518. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  12519. + "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
  12520. + ar->chandef.chan->center_freq,
  12521. + ar->chandef.center_freq1,
  12522. +@@ -2474,24 +3027,27 @@ static void ath10k_config_chan(struct at
  12523. + /* First stop monitor interface. Some FW versions crash if there's a
  12524. + * lone monitor interface. */
  12525. + if (ar->monitor_started)
  12526. +- ath10k_monitor_vdev_stop(ar);
  12527. ++ ath10k_monitor_stop(ar);
  12528. +
  12529. + list_for_each_entry(arvif, &ar->arvifs, list) {
  12530. + if (!arvif->is_started)
  12531. + continue;
  12532. +
  12533. ++ if (!arvif->is_up)
  12534. ++ continue;
  12535. ++
  12536. + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
  12537. + continue;
  12538. +
  12539. +- ret = ath10k_vdev_stop(arvif);
  12540. ++ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
  12541. + if (ret) {
  12542. +- ath10k_warn("failed to stop vdev %d: %d\n",
  12543. ++ ath10k_warn(ar, "failed to down vdev %d: %d\n",
  12544. + arvif->vdev_id, ret);
  12545. + continue;
  12546. + }
  12547. + }
  12548. +
  12549. +- /* all vdevs are now stopped - now attempt to restart them */
  12550. ++ /* all vdevs are downed now - attempt to restart and re-up them */
  12551. +
  12552. + list_for_each_entry(arvif, &ar->arvifs, list) {
  12553. + if (!arvif->is_started)
  12554. +@@ -2500,9 +3056,9 @@ static void ath10k_config_chan(struct at
  12555. + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
  12556. + continue;
  12557. +
  12558. +- ret = ath10k_vdev_start(arvif);
  12559. ++ ret = ath10k_vdev_restart(arvif);
  12560. + if (ret) {
  12561. +- ath10k_warn("failed to start vdev %d: %d\n",
  12562. ++ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
  12563. + arvif->vdev_id, ret);
  12564. + continue;
  12565. + }
  12566. +@@ -2513,14 +3069,70 @@ static void ath10k_config_chan(struct at
  12567. + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
  12568. + arvif->bssid);
  12569. + if (ret) {
  12570. +- ath10k_warn("failed to bring vdev up %d: %d\n",
  12571. ++ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
  12572. + arvif->vdev_id, ret);
  12573. + continue;
  12574. + }
  12575. + }
  12576. +
  12577. +- if (ath10k_monitor_is_enabled(ar))
  12578. +- ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
  12579. ++ ath10k_monitor_recalc(ar);
  12580. ++}
  12581. ++
  12582. ++static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
  12583. ++{
  12584. ++ int ret;
  12585. ++ u32 param;
  12586. ++
  12587. ++ lockdep_assert_held(&ar->conf_mutex);
  12588. ++
  12589. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
  12590. ++
  12591. ++ param = ar->wmi.pdev_param->txpower_limit2g;
  12592. ++ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
  12593. ++ if (ret) {
  12594. ++ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
  12595. ++ txpower, ret);
  12596. ++ return ret;
  12597. ++ }
  12598. ++
  12599. ++ param = ar->wmi.pdev_param->txpower_limit5g;
  12600. ++ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
  12601. ++ if (ret) {
  12602. ++ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
  12603. ++ txpower, ret);
  12604. ++ return ret;
  12605. ++ }
  12606. ++
  12607. ++ return 0;
  12608. ++}
  12609. ++
  12610. ++static int ath10k_mac_txpower_recalc(struct ath10k *ar)
  12611. ++{
  12612. ++ struct ath10k_vif *arvif;
  12613. ++ int ret, txpower = -1;
  12614. ++
  12615. ++ lockdep_assert_held(&ar->conf_mutex);
  12616. ++
  12617. ++ list_for_each_entry(arvif, &ar->arvifs, list) {
  12618. ++ WARN_ON(arvif->txpower < 0);
  12619. ++
  12620. ++ if (txpower == -1)
  12621. ++ txpower = arvif->txpower;
  12622. ++ else
  12623. ++ txpower = min(txpower, arvif->txpower);
  12624. ++ }
  12625. ++
  12626. ++ if (WARN_ON(txpower == -1))
  12627. ++ return -EINVAL;
  12628. ++
  12629. ++ ret = ath10k_mac_txpower_setup(ar, txpower);
  12630. ++ if (ret) {
  12631. ++ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
  12632. ++ txpower, ret);
  12633. ++ return ret;
  12634. ++ }
  12635. ++
  12636. ++ return 0;
  12637. + }
  12638. +
  12639. + static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
  12640. +@@ -2528,12 +3140,11 @@ static int ath10k_config(struct ieee8021
  12641. + struct ath10k *ar = hw->priv;
  12642. + struct ieee80211_conf *conf = &hw->conf;
  12643. + int ret = 0;
  12644. +- u32 param;
  12645. +
  12646. + mutex_lock(&ar->conf_mutex);
  12647. +
  12648. + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
  12649. +- ath10k_dbg(ATH10K_DBG_MAC,
  12650. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  12651. + "mac config channel %dMHz flags 0x%x radar %d\n",
  12652. + conf->chandef.chan->center_freq,
  12653. + conf->chandef.chan->flags,
  12654. +@@ -2552,48 +3163,31 @@ static int ath10k_config(struct ieee8021
  12655. + }
  12656. + }
  12657. +
  12658. +- if (changed & IEEE80211_CONF_CHANGE_POWER) {
  12659. +- ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
  12660. +- hw->conf.power_level);
  12661. +-
  12662. +- param = ar->wmi.pdev_param->txpower_limit2g;
  12663. +- ret = ath10k_wmi_pdev_set_param(ar, param,
  12664. +- hw->conf.power_level * 2);
  12665. +- if (ret)
  12666. +- ath10k_warn("failed to set 2g txpower %d: %d\n",
  12667. +- hw->conf.power_level, ret);
  12668. +-
  12669. +- param = ar->wmi.pdev_param->txpower_limit5g;
  12670. +- ret = ath10k_wmi_pdev_set_param(ar, param,
  12671. +- hw->conf.power_level * 2);
  12672. +- if (ret)
  12673. +- ath10k_warn("failed to set 5g txpower %d: %d\n",
  12674. +- hw->conf.power_level, ret);
  12675. +- }
  12676. +-
  12677. + if (changed & IEEE80211_CONF_CHANGE_PS)
  12678. + ath10k_config_ps(ar);
  12679. +
  12680. + if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
  12681. +- if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
  12682. +- ar->monitor = true;
  12683. +- ret = ath10k_monitor_start(ar);
  12684. +- if (ret) {
  12685. +- ath10k_warn("failed to start monitor (config): %d\n",
  12686. +- ret);
  12687. +- ar->monitor = false;
  12688. +- }
  12689. +- } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
  12690. +- ar->monitor) {
  12691. +- ar->monitor = false;
  12692. +- ath10k_monitor_stop(ar);
  12693. +- }
  12694. ++ ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
  12695. ++ ret = ath10k_monitor_recalc(ar);
  12696. ++ if (ret)
  12697. ++ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
  12698. + }
  12699. +
  12700. + mutex_unlock(&ar->conf_mutex);
  12701. + return ret;
  12702. + }
  12703. +
  12704. ++static u32 get_nss_from_chainmask(u16 chain_mask)
  12705. ++{
  12706. ++ if ((chain_mask & 0x15) == 0x15)
  12707. ++ return 4;
  12708. ++ else if ((chain_mask & 0x7) == 0x7)
  12709. ++ return 3;
  12710. ++ else if ((chain_mask & 0x3) == 0x3)
  12711. ++ return 2;
  12712. ++ return 1;
  12713. ++}
  12714. ++
  12715. + /*
  12716. + * TODO:
  12717. + * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
  12718. +@@ -2619,22 +3213,26 @@ static int ath10k_add_interface(struct i
  12719. + arvif->ar = ar;
  12720. + arvif->vif = vif;
  12721. +
  12722. +- INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
  12723. + INIT_LIST_HEAD(&arvif->list);
  12724. +
  12725. +- bit = ffs(ar->free_vdev_map);
  12726. +- if (bit == 0) {
  12727. ++ if (ar->free_vdev_map == 0) {
  12728. ++ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
  12729. + ret = -EBUSY;
  12730. + goto err;
  12731. + }
  12732. ++ bit = __ffs64(ar->free_vdev_map);
  12733. +
  12734. +- arvif->vdev_id = bit - 1;
  12735. +- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
  12736. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
  12737. ++ bit, ar->free_vdev_map);
  12738. +
  12739. +- if (ar->p2p)
  12740. +- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
  12741. ++ arvif->vdev_id = bit;
  12742. ++ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
  12743. +
  12744. + switch (vif->type) {
  12745. ++ case NL80211_IFTYPE_P2P_DEVICE:
  12746. ++ arvif->vdev_type = WMI_VDEV_TYPE_STA;
  12747. ++ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
  12748. ++ break;
  12749. + case NL80211_IFTYPE_UNSPECIFIED:
  12750. + case NL80211_IFTYPE_STATION:
  12751. + arvif->vdev_type = WMI_VDEV_TYPE_STA;
  12752. +@@ -2658,50 +3256,98 @@ static int ath10k_add_interface(struct i
  12753. + break;
  12754. + }
  12755. +
  12756. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
  12757. +- arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
  12758. ++ /* Some firmware revisions don't wait for beacon tx completion before
  12759. ++ * sending another SWBA event. This could lead to hardware using old
  12760. ++ * (freed) beacon data in some cases, e.g. tx credit starvation
  12761. ++ * combined with missed TBTT. This is very very rare.
  12762. ++ *
  12763. ++ * On non-IOMMU-enabled hosts this could be a possible security issue
  12764. ++ * because hw could beacon some random data on the air. On
  12765. ++ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
  12766. ++ * device would crash.
  12767. ++ *
  12768. ++ * Since there are no beacon tx completions (implicit nor explicit)
  12769. ++ * propagated to host the only workaround for this is to allocate a
  12770. ++ * DMA-coherent buffer for a lifetime of a vif and use it for all
  12771. ++ * beacon tx commands. Worst case for this approach is some beacons may
  12772. ++ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
  12773. ++ */
  12774. ++ if (vif->type == NL80211_IFTYPE_ADHOC ||
  12775. ++ vif->type == NL80211_IFTYPE_AP) {
  12776. ++ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
  12777. ++ IEEE80211_MAX_FRAME_LEN,
  12778. ++ &arvif->beacon_paddr,
  12779. ++ GFP_ATOMIC);
  12780. ++ if (!arvif->beacon_buf) {
  12781. ++ ret = -ENOMEM;
  12782. ++ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
  12783. ++ ret);
  12784. ++ goto err;
  12785. ++ }
  12786. ++ }
  12787. ++
  12788. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
  12789. ++ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
  12790. ++ arvif->beacon_buf ? "single-buf" : "per-skb");
  12791. +
  12792. + ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
  12793. + arvif->vdev_subtype, vif->addr);
  12794. + if (ret) {
  12795. +- ath10k_warn("failed to create WMI vdev %i: %d\n",
  12796. ++ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
  12797. + arvif->vdev_id, ret);
  12798. + goto err;
  12799. + }
  12800. +
  12801. +- ar->free_vdev_map &= ~BIT(arvif->vdev_id);
  12802. ++ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
  12803. + list_add(&arvif->list, &ar->arvifs);
  12804. +
  12805. +- vdev_param = ar->wmi.vdev_param->def_keyid;
  12806. +- ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
  12807. +- arvif->def_wep_key_idx);
  12808. ++ /* It makes no sense to have firmware do keepalives. mac80211 already
  12809. ++ * takes care of this with idle connection polling.
  12810. ++ */
  12811. ++ ret = ath10k_mac_vif_disable_keepalive(arvif);
  12812. + if (ret) {
  12813. +- ath10k_warn("failed to set vdev %i default key id: %d\n",
  12814. ++ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
  12815. + arvif->vdev_id, ret);
  12816. + goto err_vdev_delete;
  12817. + }
  12818. +
  12819. ++ arvif->def_wep_key_idx = -1;
  12820. ++
  12821. + vdev_param = ar->wmi.vdev_param->tx_encap_type;
  12822. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  12823. + ATH10K_HW_TXRX_NATIVE_WIFI);
  12824. + /* 10.X firmware does not support this VDEV parameter. Do not warn */
  12825. + if (ret && ret != -EOPNOTSUPP) {
  12826. +- ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
  12827. ++ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
  12828. + arvif->vdev_id, ret);
  12829. + goto err_vdev_delete;
  12830. + }
  12831. +
  12832. ++ if (ar->cfg_tx_chainmask) {
  12833. ++ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
  12834. ++
  12835. ++ vdev_param = ar->wmi.vdev_param->nss;
  12836. ++ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  12837. ++ nss);
  12838. ++ if (ret) {
  12839. ++ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
  12840. ++ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
  12841. ++ ret);
  12842. ++ goto err_vdev_delete;
  12843. ++ }
  12844. ++ }
  12845. ++
  12846. + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
  12847. + ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
  12848. + if (ret) {
  12849. +- ath10k_warn("failed to create vdev %i peer for AP: %d\n",
  12850. ++ ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
  12851. + arvif->vdev_id, ret);
  12852. + goto err_vdev_delete;
  12853. + }
  12854. +
  12855. + ret = ath10k_mac_set_kickout(arvif);
  12856. + if (ret) {
  12857. +- ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
  12858. ++ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
  12859. + arvif->vdev_id, ret);
  12860. + goto err_peer_delete;
  12861. + }
  12862. +@@ -2713,27 +3359,21 @@ static int ath10k_add_interface(struct i
  12863. + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
  12864. + param, value);
  12865. + if (ret) {
  12866. +- ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
  12867. ++ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
  12868. + arvif->vdev_id, ret);
  12869. + goto err_peer_delete;
  12870. + }
  12871. +
  12872. +- param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
  12873. +- value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
  12874. +- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
  12875. +- param, value);
  12876. ++ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
  12877. + if (ret) {
  12878. +- ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
  12879. ++ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
  12880. + arvif->vdev_id, ret);
  12881. + goto err_peer_delete;
  12882. + }
  12883. +
  12884. +- param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
  12885. +- value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
  12886. +- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
  12887. +- param, value);
  12888. ++ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
  12889. + if (ret) {
  12890. +- ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
  12891. ++ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
  12892. + arvif->vdev_id, ret);
  12893. + goto err_peer_delete;
  12894. + }
  12895. +@@ -2741,15 +3381,22 @@ static int ath10k_add_interface(struct i
  12896. +
  12897. + ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
  12898. + if (ret) {
  12899. +- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
  12900. ++ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
  12901. ++ arvif->vdev_id, ret);
  12902. ++ goto err_peer_delete;
  12903. ++ }
  12904. ++
  12905. ++ ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
  12906. ++ if (ret) {
  12907. ++ ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
  12908. + arvif->vdev_id, ret);
  12909. + goto err_peer_delete;
  12910. + }
  12911. +
  12912. +- ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
  12913. ++ arvif->txpower = vif->bss_conf.txpower;
  12914. ++ ret = ath10k_mac_txpower_recalc(ar);
  12915. + if (ret) {
  12916. +- ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
  12917. +- arvif->vdev_id, ret);
  12918. ++ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
  12919. + goto err_peer_delete;
  12920. + }
  12921. +
  12922. +@@ -2762,10 +3409,16 @@ err_peer_delete:
  12923. +
  12924. + err_vdev_delete:
  12925. + ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
  12926. +- ar->free_vdev_map &= ~BIT(arvif->vdev_id);
  12927. ++ ar->free_vdev_map |= 1LL << arvif->vdev_id;
  12928. + list_del(&arvif->list);
  12929. +
  12930. + err:
  12931. ++ if (arvif->beacon_buf) {
  12932. ++ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
  12933. ++ arvif->beacon_buf, arvif->beacon_paddr);
  12934. ++ arvif->beacon_buf = NULL;
  12935. ++ }
  12936. ++
  12937. + mutex_unlock(&ar->conf_mutex);
  12938. +
  12939. + return ret;
  12940. +@@ -2780,38 +3433,51 @@ static void ath10k_remove_interface(stru
  12941. +
  12942. + mutex_lock(&ar->conf_mutex);
  12943. +
  12944. +- cancel_work_sync(&arvif->wep_key_work);
  12945. +-
  12946. + spin_lock_bh(&ar->data_lock);
  12947. +- if (arvif->beacon) {
  12948. +- dma_unmap_single(arvif->ar->dev,
  12949. +- ATH10K_SKB_CB(arvif->beacon)->paddr,
  12950. +- arvif->beacon->len, DMA_TO_DEVICE);
  12951. +- dev_kfree_skb_any(arvif->beacon);
  12952. +- arvif->beacon = NULL;
  12953. +- }
  12954. ++ ath10k_mac_vif_beacon_cleanup(arvif);
  12955. + spin_unlock_bh(&ar->data_lock);
  12956. +
  12957. +- ar->free_vdev_map |= 1 << (arvif->vdev_id);
  12958. ++ ret = ath10k_spectral_vif_stop(arvif);
  12959. ++ if (ret)
  12960. ++ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
  12961. ++ arvif->vdev_id, ret);
  12962. ++
  12963. ++ ar->free_vdev_map |= 1LL << arvif->vdev_id;
  12964. + list_del(&arvif->list);
  12965. +
  12966. + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
  12967. +- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
  12968. ++ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
  12969. ++ vif->addr);
  12970. + if (ret)
  12971. +- ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
  12972. ++ ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
  12973. + arvif->vdev_id, ret);
  12974. +
  12975. + kfree(arvif->u.ap.noa_data);
  12976. + }
  12977. +
  12978. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
  12979. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
  12980. + arvif->vdev_id);
  12981. +
  12982. + ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
  12983. + if (ret)
  12984. +- ath10k_warn("failed to delete WMI vdev %i: %d\n",
  12985. ++ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
  12986. + arvif->vdev_id, ret);
  12987. +
  12988. ++ /* Some firmware revisions don't notify host about self-peer removal
  12989. ++ * until after associated vdev is deleted.
  12990. ++ */
  12991. ++ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
  12992. ++ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
  12993. ++ vif->addr);
  12994. ++ if (ret)
  12995. ++ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
  12996. ++ arvif->vdev_id, ret);
  12997. ++
  12998. ++ spin_lock_bh(&ar->data_lock);
  12999. ++ ar->num_peers--;
  13000. ++ spin_unlock_bh(&ar->data_lock);
  13001. ++ }
  13002. ++
  13003. + ath10k_peer_cleanup(ar, arvif->vdev_id);
  13004. +
  13005. + mutex_unlock(&ar->conf_mutex);
  13006. +@@ -2844,18 +3510,9 @@ static void ath10k_configure_filter(stru
  13007. + *total_flags &= SUPPORTED_FILTERS;
  13008. + ar->filter_flags = *total_flags;
  13009. +
  13010. +- if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
  13011. +- ar->promisc = true;
  13012. +- ret = ath10k_monitor_start(ar);
  13013. +- if (ret) {
  13014. +- ath10k_warn("failed to start monitor (promisc): %d\n",
  13015. +- ret);
  13016. +- ar->promisc = false;
  13017. +- }
  13018. +- } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
  13019. +- ar->promisc = false;
  13020. +- ath10k_monitor_stop(ar);
  13021. +- }
  13022. ++ ret = ath10k_monitor_recalc(ar);
  13023. ++ if (ret)
  13024. ++ ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
  13025. +
  13026. + mutex_unlock(&ar->conf_mutex);
  13027. + }
  13028. +@@ -2868,7 +3525,7 @@ static void ath10k_bss_info_changed(stru
  13029. + struct ath10k *ar = hw->priv;
  13030. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13031. + int ret = 0;
  13032. +- u32 vdev_param, pdev_param;
  13033. ++ u32 vdev_param, pdev_param, slottime, preamble;
  13034. +
  13035. + mutex_lock(&ar->conf_mutex);
  13036. +
  13037. +@@ -2880,17 +3537,17 @@ static void ath10k_bss_info_changed(stru
  13038. + vdev_param = ar->wmi.vdev_param->beacon_interval;
  13039. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  13040. + arvif->beacon_interval);
  13041. +- ath10k_dbg(ATH10K_DBG_MAC,
  13042. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13043. + "mac vdev %d beacon_interval %d\n",
  13044. + arvif->vdev_id, arvif->beacon_interval);
  13045. +
  13046. + if (ret)
  13047. +- ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
  13048. ++ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
  13049. + arvif->vdev_id, ret);
  13050. + }
  13051. +
  13052. + if (changed & BSS_CHANGED_BEACON) {
  13053. +- ath10k_dbg(ATH10K_DBG_MAC,
  13054. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13055. + "vdev %d set beacon tx mode to staggered\n",
  13056. + arvif->vdev_id);
  13057. +
  13058. +@@ -2898,14 +3555,26 @@ static void ath10k_bss_info_changed(stru
  13059. + ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
  13060. + WMI_BEACON_STAGGERED_MODE);
  13061. + if (ret)
  13062. +- ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
  13063. ++ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
  13064. ++ arvif->vdev_id, ret);
  13065. ++
  13066. ++ ret = ath10k_mac_setup_bcn_tmpl(arvif);
  13067. ++ if (ret)
  13068. ++ ath10k_warn(ar, "failed to update beacon template: %d\n",
  13069. ++ ret);
  13070. ++ }
  13071. ++
  13072. ++ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
  13073. ++ ret = ath10k_mac_setup_prb_tmpl(arvif);
  13074. ++ if (ret)
  13075. ++ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
  13076. + arvif->vdev_id, ret);
  13077. + }
  13078. +
  13079. +- if (changed & BSS_CHANGED_BEACON_INFO) {
  13080. ++ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
  13081. + arvif->dtim_period = info->dtim_period;
  13082. +
  13083. +- ath10k_dbg(ATH10K_DBG_MAC,
  13084. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13085. + "mac vdev %d dtim_period %d\n",
  13086. + arvif->vdev_id, arvif->dtim_period);
  13087. +
  13088. +@@ -2913,7 +3582,7 @@ static void ath10k_bss_info_changed(stru
  13089. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  13090. + arvif->dtim_period);
  13091. + if (ret)
  13092. +- ath10k_warn("failed to set dtim period for vdev %d: %i\n",
  13093. ++ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
  13094. + arvif->vdev_id, ret);
  13095. + }
  13096. +
  13097. +@@ -2925,91 +3594,48 @@ static void ath10k_bss_info_changed(stru
  13098. + arvif->u.ap.hidden_ssid = info->hidden_ssid;
  13099. + }
  13100. +
  13101. +- if (changed & BSS_CHANGED_BSSID) {
  13102. +- if (!is_zero_ether_addr(info->bssid)) {
  13103. +- ath10k_dbg(ATH10K_DBG_MAC,
  13104. +- "mac vdev %d create peer %pM\n",
  13105. +- arvif->vdev_id, info->bssid);
  13106. +-
  13107. +- ret = ath10k_peer_create(ar, arvif->vdev_id,
  13108. +- info->bssid);
  13109. +- if (ret)
  13110. +- ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
  13111. +- info->bssid, arvif->vdev_id, ret);
  13112. +-
  13113. +- if (vif->type == NL80211_IFTYPE_STATION) {
  13114. +- /*
  13115. +- * this is never erased as we it for crypto key
  13116. +- * clearing; this is FW requirement
  13117. +- */
  13118. +- memcpy(arvif->bssid, info->bssid, ETH_ALEN);
  13119. +-
  13120. +- ath10k_dbg(ATH10K_DBG_MAC,
  13121. +- "mac vdev %d start %pM\n",
  13122. +- arvif->vdev_id, info->bssid);
  13123. +-
  13124. +- ret = ath10k_vdev_start(arvif);
  13125. +- if (ret) {
  13126. +- ath10k_warn("failed to start vdev %i: %d\n",
  13127. +- arvif->vdev_id, ret);
  13128. +- goto exit;
  13129. +- }
  13130. +-
  13131. +- arvif->is_started = true;
  13132. +- }
  13133. +-
  13134. +- /*
  13135. +- * Mac80211 does not keep IBSS bssid when leaving IBSS,
  13136. +- * so driver need to store it. It is needed when leaving
  13137. +- * IBSS in order to remove BSSID peer.
  13138. +- */
  13139. +- if (vif->type == NL80211_IFTYPE_ADHOC)
  13140. +- memcpy(arvif->bssid, info->bssid,
  13141. +- ETH_ALEN);
  13142. +- }
  13143. +- }
  13144. ++ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
  13145. ++ ether_addr_copy(arvif->bssid, info->bssid);
  13146. +
  13147. + if (changed & BSS_CHANGED_BEACON_ENABLED)
  13148. + ath10k_control_beaconing(arvif, info);
  13149. +
  13150. + if (changed & BSS_CHANGED_ERP_CTS_PROT) {
  13151. + arvif->use_cts_prot = info->use_cts_prot;
  13152. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
  13153. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
  13154. + arvif->vdev_id, info->use_cts_prot);
  13155. +
  13156. + ret = ath10k_recalc_rtscts_prot(arvif);
  13157. + if (ret)
  13158. +- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
  13159. ++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
  13160. + arvif->vdev_id, ret);
  13161. + }
  13162. +
  13163. + if (changed & BSS_CHANGED_ERP_SLOT) {
  13164. +- u32 slottime;
  13165. + if (info->use_short_slot)
  13166. + slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
  13167. +
  13168. + else
  13169. + slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
  13170. +
  13171. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
  13172. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
  13173. + arvif->vdev_id, slottime);
  13174. +
  13175. + vdev_param = ar->wmi.vdev_param->slot_time;
  13176. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  13177. + slottime);
  13178. + if (ret)
  13179. +- ath10k_warn("failed to set erp slot for vdev %d: %i\n",
  13180. ++ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
  13181. + arvif->vdev_id, ret);
  13182. + }
  13183. +
  13184. + if (changed & BSS_CHANGED_ERP_PREAMBLE) {
  13185. +- u32 preamble;
  13186. + if (info->use_short_preamble)
  13187. + preamble = WMI_VDEV_PREAMBLE_SHORT;
  13188. + else
  13189. + preamble = WMI_VDEV_PREAMBLE_LONG;
  13190. +
  13191. +- ath10k_dbg(ATH10K_DBG_MAC,
  13192. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13193. + "mac vdev %d preamble %dn",
  13194. + arvif->vdev_id, preamble);
  13195. +
  13196. +@@ -3017,16 +3643,44 @@ static void ath10k_bss_info_changed(stru
  13197. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  13198. + preamble);
  13199. + if (ret)
  13200. +- ath10k_warn("failed to set preamble for vdev %d: %i\n",
  13201. ++ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
  13202. + arvif->vdev_id, ret);
  13203. + }
  13204. +
  13205. + if (changed & BSS_CHANGED_ASSOC) {
  13206. +- if (info->assoc)
  13207. ++ if (info->assoc) {
  13208. ++ /* Workaround: Make sure monitor vdev is not running
  13209. ++ * when associating to prevent some firmware revisions
  13210. ++ * (e.g. 10.1 and 10.2) from crashing.
  13211. ++ */
  13212. ++ if (ar->monitor_started)
  13213. ++ ath10k_monitor_stop(ar);
  13214. + ath10k_bss_assoc(hw, vif, info);
  13215. ++ ath10k_monitor_recalc(ar);
  13216. ++ } else {
  13217. ++ ath10k_bss_disassoc(hw, vif);
  13218. ++ }
  13219. ++ }
  13220. ++
  13221. ++ if (changed & BSS_CHANGED_TXPOWER) {
  13222. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
  13223. ++ arvif->vdev_id, info->txpower);
  13224. ++
  13225. ++ arvif->txpower = info->txpower;
  13226. ++ ret = ath10k_mac_txpower_recalc(ar);
  13227. ++ if (ret)
  13228. ++ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
  13229. ++ }
  13230. ++
  13231. ++ if (changed & BSS_CHANGED_PS) {
  13232. ++ arvif->ps = vif->bss_conf.ps;
  13233. ++
  13234. ++ ret = ath10k_config_ps(ar);
  13235. ++ if (ret)
  13236. ++ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
  13237. ++ arvif->vdev_id, ret);
  13238. + }
  13239. +
  13240. +-exit:
  13241. + mutex_unlock(&ar->conf_mutex);
  13242. + }
  13243. +
  13244. +@@ -3043,20 +3697,26 @@ static int ath10k_hw_scan(struct ieee802
  13245. + mutex_lock(&ar->conf_mutex);
  13246. +
  13247. + spin_lock_bh(&ar->data_lock);
  13248. +- if (ar->scan.in_progress) {
  13249. +- spin_unlock_bh(&ar->data_lock);
  13250. ++ switch (ar->scan.state) {
  13251. ++ case ATH10K_SCAN_IDLE:
  13252. ++ reinit_completion(&ar->scan.started);
  13253. ++ reinit_completion(&ar->scan.completed);
  13254. ++ ar->scan.state = ATH10K_SCAN_STARTING;
  13255. ++ ar->scan.is_roc = false;
  13256. ++ ar->scan.vdev_id = arvif->vdev_id;
  13257. ++ ret = 0;
  13258. ++ break;
  13259. ++ case ATH10K_SCAN_STARTING:
  13260. ++ case ATH10K_SCAN_RUNNING:
  13261. ++ case ATH10K_SCAN_ABORTING:
  13262. + ret = -EBUSY;
  13263. +- goto exit;
  13264. ++ break;
  13265. + }
  13266. +-
  13267. +- reinit_completion(&ar->scan.started);
  13268. +- reinit_completion(&ar->scan.completed);
  13269. +- ar->scan.in_progress = true;
  13270. +- ar->scan.aborting = false;
  13271. +- ar->scan.is_roc = false;
  13272. +- ar->scan.vdev_id = arvif->vdev_id;
  13273. + spin_unlock_bh(&ar->data_lock);
  13274. +
  13275. ++ if (ret)
  13276. ++ goto exit;
  13277. ++
  13278. + memset(&arg, 0, sizeof(arg));
  13279. + ath10k_wmi_start_scan_init(ar, &arg);
  13280. + arg.vdev_id = arvif->vdev_id;
  13281. +@@ -3088,9 +3748,9 @@ static int ath10k_hw_scan(struct ieee802
  13282. +
  13283. + ret = ath10k_start_scan(ar, &arg);
  13284. + if (ret) {
  13285. +- ath10k_warn("failed to start hw scan: %d\n", ret);
  13286. ++ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
  13287. + spin_lock_bh(&ar->data_lock);
  13288. +- ar->scan.in_progress = false;
  13289. ++ ar->scan.state = ATH10K_SCAN_IDLE;
  13290. + spin_unlock_bh(&ar->data_lock);
  13291. + }
  13292. +
  13293. +@@ -3103,15 +3763,12 @@ static void ath10k_cancel_hw_scan(struct
  13294. + struct ieee80211_vif *vif)
  13295. + {
  13296. + struct ath10k *ar = hw->priv;
  13297. +- int ret;
  13298. +
  13299. + mutex_lock(&ar->conf_mutex);
  13300. +- ret = ath10k_abort_scan(ar);
  13301. +- if (ret) {
  13302. +- ath10k_warn("failed to abort scan: %d\n", ret);
  13303. +- ieee80211_scan_completed(hw, 1 /* aborted */);
  13304. +- }
  13305. ++ ath10k_scan_abort(ar);
  13306. + mutex_unlock(&ar->conf_mutex);
  13307. ++
  13308. ++ cancel_delayed_work_sync(&ar->scan.timeout);
  13309. + }
  13310. +
  13311. + static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
  13312. +@@ -3148,7 +3805,7 @@ static void ath10k_set_key_h_def_keyidx(
  13313. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
  13314. + key->keyidx);
  13315. + if (ret)
  13316. +- ath10k_warn("failed to set vdev %i group key as default key: %d\n",
  13317. ++ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
  13318. + arvif->vdev_id, ret);
  13319. + }
  13320. +
  13321. +@@ -3162,6 +3819,7 @@ static int ath10k_set_key(struct ieee802
  13322. + const u8 *peer_addr;
  13323. + bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
  13324. + key->cipher == WLAN_CIPHER_SUITE_WEP104;
  13325. ++ bool def_idx = false;
  13326. + int ret = 0;
  13327. +
  13328. + if (key->keyidx > WMI_MAX_KEY_INDEX)
  13329. +@@ -3186,7 +3844,7 @@ static int ath10k_set_key(struct ieee802
  13330. +
  13331. + if (!peer) {
  13332. + if (cmd == SET_KEY) {
  13333. +- ath10k_warn("failed to install key for non-existent peer %pM\n",
  13334. ++ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
  13335. + peer_addr);
  13336. + ret = -EOPNOTSUPP;
  13337. + goto exit;
  13338. +@@ -3207,9 +3865,16 @@ static int ath10k_set_key(struct ieee802
  13339. + ath10k_clear_vdev_key(arvif, key);
  13340. + }
  13341. +
  13342. +- ret = ath10k_install_key(arvif, key, cmd, peer_addr);
  13343. ++ /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
  13344. ++ * static WEP, do not set this flag for the keys whose key id
  13345. ++ * is greater than default key id.
  13346. ++ */
  13347. ++ if (arvif->def_wep_key_idx == -1)
  13348. ++ def_idx = true;
  13349. ++
  13350. ++ ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
  13351. + if (ret) {
  13352. +- ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
  13353. ++ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
  13354. + arvif->vdev_id, peer_addr, ret);
  13355. + goto exit;
  13356. + }
  13357. +@@ -3224,7 +3889,7 @@ static int ath10k_set_key(struct ieee802
  13358. + peer->keys[key->keyidx] = NULL;
  13359. + else if (peer == NULL)
  13360. + /* impossible unless FW goes crazy */
  13361. +- ath10k_warn("Peer %pM disappeared!\n", peer_addr);
  13362. ++ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
  13363. + spin_unlock_bh(&ar->data_lock);
  13364. +
  13365. + exit:
  13366. +@@ -3232,6 +3897,39 @@ exit:
  13367. + return ret;
  13368. + }
  13369. +
  13370. ++static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
  13371. ++ struct ieee80211_vif *vif,
  13372. ++ int keyidx)
  13373. ++{
  13374. ++ struct ath10k *ar = hw->priv;
  13375. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13376. ++ int ret;
  13377. ++
  13378. ++ mutex_lock(&arvif->ar->conf_mutex);
  13379. ++
  13380. ++ if (arvif->ar->state != ATH10K_STATE_ON)
  13381. ++ goto unlock;
  13382. ++
  13383. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
  13384. ++ arvif->vdev_id, keyidx);
  13385. ++
  13386. ++ ret = ath10k_wmi_vdev_set_param(arvif->ar,
  13387. ++ arvif->vdev_id,
  13388. ++ arvif->ar->wmi.vdev_param->def_keyid,
  13389. ++ keyidx);
  13390. ++
  13391. ++ if (ret) {
  13392. ++ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
  13393. ++ arvif->vdev_id,
  13394. ++ ret);
  13395. ++ goto unlock;
  13396. ++ }
  13397. ++
  13398. ++ arvif->def_wep_key_idx = keyidx;
  13399. ++unlock:
  13400. ++ mutex_unlock(&arvif->ar->conf_mutex);
  13401. ++}
  13402. ++
  13403. + static void ath10k_sta_rc_update_wk(struct work_struct *wk)
  13404. + {
  13405. + struct ath10k *ar;
  13406. +@@ -3260,51 +3958,83 @@ static void ath10k_sta_rc_update_wk(stru
  13407. + mutex_lock(&ar->conf_mutex);
  13408. +
  13409. + if (changed & IEEE80211_RC_BW_CHANGED) {
  13410. +- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
  13411. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
  13412. + sta->addr, bw);
  13413. +
  13414. + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
  13415. + WMI_PEER_CHAN_WIDTH, bw);
  13416. + if (err)
  13417. +- ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
  13418. ++ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
  13419. + sta->addr, bw, err);
  13420. + }
  13421. +
  13422. + if (changed & IEEE80211_RC_NSS_CHANGED) {
  13423. +- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
  13424. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
  13425. + sta->addr, nss);
  13426. +
  13427. + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
  13428. + WMI_PEER_NSS, nss);
  13429. + if (err)
  13430. +- ath10k_warn("failed to update STA %pM nss %d: %d\n",
  13431. ++ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
  13432. + sta->addr, nss, err);
  13433. + }
  13434. +
  13435. + if (changed & IEEE80211_RC_SMPS_CHANGED) {
  13436. +- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
  13437. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
  13438. + sta->addr, smps);
  13439. +
  13440. + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
  13441. + WMI_PEER_SMPS_STATE, smps);
  13442. + if (err)
  13443. +- ath10k_warn("failed to update STA %pM smps %d: %d\n",
  13444. ++ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
  13445. + sta->addr, smps, err);
  13446. + }
  13447. +
  13448. +- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
  13449. +- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
  13450. ++ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
  13451. ++ changed & IEEE80211_RC_NSS_CHANGED) {
  13452. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
  13453. + sta->addr);
  13454. +
  13455. +- err = ath10k_station_assoc(ar, arvif, sta, true);
  13456. ++ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
  13457. + if (err)
  13458. +- ath10k_warn("failed to reassociate station: %pM\n",
  13459. ++ ath10k_warn(ar, "failed to reassociate station: %pM\n",
  13460. + sta->addr);
  13461. + }
  13462. +
  13463. + mutex_unlock(&ar->conf_mutex);
  13464. + }
  13465. +
  13466. ++static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
  13467. ++{
  13468. ++ struct ath10k *ar = arvif->ar;
  13469. ++
  13470. ++ lockdep_assert_held(&ar->conf_mutex);
  13471. ++
  13472. ++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
  13473. ++ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  13474. ++ return 0;
  13475. ++
  13476. ++ if (ar->num_stations >= ar->max_num_stations)
  13477. ++ return -ENOBUFS;
  13478. ++
  13479. ++ ar->num_stations++;
  13480. ++
  13481. ++ return 0;
  13482. ++}
  13483. ++
  13484. ++static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
  13485. ++{
  13486. ++ struct ath10k *ar = arvif->ar;
  13487. ++
  13488. ++ lockdep_assert_held(&ar->conf_mutex);
  13489. ++
  13490. ++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
  13491. ++ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  13492. ++ return;
  13493. ++
  13494. ++ ar->num_stations--;
  13495. ++}
  13496. ++
  13497. + static int ath10k_sta_state(struct ieee80211_hw *hw,
  13498. + struct ieee80211_vif *vif,
  13499. + struct ieee80211_sta *sta,
  13500. +@@ -3314,7 +4044,6 @@ static int ath10k_sta_state(struct ieee8
  13501. + struct ath10k *ar = hw->priv;
  13502. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13503. + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  13504. +- int max_num_peers;
  13505. + int ret = 0;
  13506. +
  13507. + if (old_state == IEEE80211_STA_NOTEXIST &&
  13508. +@@ -3332,46 +4061,72 @@ static int ath10k_sta_state(struct ieee8
  13509. + mutex_lock(&ar->conf_mutex);
  13510. +
  13511. + if (old_state == IEEE80211_STA_NOTEXIST &&
  13512. +- new_state == IEEE80211_STA_NONE &&
  13513. +- vif->type != NL80211_IFTYPE_STATION) {
  13514. ++ new_state == IEEE80211_STA_NONE) {
  13515. + /*
  13516. + * New station addition.
  13517. + */
  13518. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  13519. +- max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
  13520. +- else
  13521. +- max_num_peers = TARGET_NUM_PEERS;
  13522. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13523. ++ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
  13524. ++ arvif->vdev_id, sta->addr,
  13525. ++ ar->num_stations + 1, ar->max_num_stations,
  13526. ++ ar->num_peers + 1, ar->max_num_peers);
  13527. +
  13528. +- if (ar->num_peers >= max_num_peers) {
  13529. +- ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
  13530. +- ar->num_peers, max_num_peers);
  13531. +- ret = -ENOBUFS;
  13532. ++ ret = ath10k_mac_inc_num_stations(arvif);
  13533. ++ if (ret) {
  13534. ++ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
  13535. ++ ar->max_num_stations);
  13536. + goto exit;
  13537. + }
  13538. +
  13539. +- ath10k_dbg(ATH10K_DBG_MAC,
  13540. +- "mac vdev %d peer create %pM (new sta) num_peers %d\n",
  13541. +- arvif->vdev_id, sta->addr, ar->num_peers);
  13542. +-
  13543. + ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
  13544. +- if (ret)
  13545. +- ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
  13546. ++ if (ret) {
  13547. ++ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
  13548. + sta->addr, arvif->vdev_id, ret);
  13549. ++ ath10k_mac_dec_num_stations(arvif);
  13550. ++ goto exit;
  13551. ++ }
  13552. ++
  13553. ++ if (vif->type == NL80211_IFTYPE_STATION) {
  13554. ++ WARN_ON(arvif->is_started);
  13555. ++
  13556. ++ ret = ath10k_vdev_start(arvif);
  13557. ++ if (ret) {
  13558. ++ ath10k_warn(ar, "failed to start vdev %i: %d\n",
  13559. ++ arvif->vdev_id, ret);
  13560. ++ WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
  13561. ++ sta->addr));
  13562. ++ ath10k_mac_dec_num_stations(arvif);
  13563. ++ goto exit;
  13564. ++ }
  13565. ++
  13566. ++ arvif->is_started = true;
  13567. ++ }
  13568. + } else if ((old_state == IEEE80211_STA_NONE &&
  13569. + new_state == IEEE80211_STA_NOTEXIST)) {
  13570. + /*
  13571. + * Existing station deletion.
  13572. + */
  13573. +- ath10k_dbg(ATH10K_DBG_MAC,
  13574. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  13575. + "mac vdev %d peer delete %pM (sta gone)\n",
  13576. + arvif->vdev_id, sta->addr);
  13577. ++
  13578. ++ if (vif->type == NL80211_IFTYPE_STATION) {
  13579. ++ WARN_ON(!arvif->is_started);
  13580. ++
  13581. ++ ret = ath10k_vdev_stop(arvif);
  13582. ++ if (ret)
  13583. ++ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
  13584. ++ arvif->vdev_id, ret);
  13585. ++
  13586. ++ arvif->is_started = false;
  13587. ++ }
  13588. ++
  13589. + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
  13590. + if (ret)
  13591. +- ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
  13592. ++ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
  13593. + sta->addr, arvif->vdev_id, ret);
  13594. +
  13595. +- if (vif->type == NL80211_IFTYPE_STATION)
  13596. +- ath10k_bss_disassoc(hw, vif);
  13597. ++ ath10k_mac_dec_num_stations(arvif);
  13598. + } else if (old_state == IEEE80211_STA_AUTH &&
  13599. + new_state == IEEE80211_STA_ASSOC &&
  13600. + (vif->type == NL80211_IFTYPE_AP ||
  13601. +@@ -3379,12 +4134,12 @@ static int ath10k_sta_state(struct ieee8
  13602. + /*
  13603. + * New association.
  13604. + */
  13605. +- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
  13606. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
  13607. + sta->addr);
  13608. +
  13609. +- ret = ath10k_station_assoc(ar, arvif, sta, false);
  13610. ++ ret = ath10k_station_assoc(ar, vif, sta, false);
  13611. + if (ret)
  13612. +- ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
  13613. ++ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
  13614. + sta->addr, arvif->vdev_id, ret);
  13615. + } else if (old_state == IEEE80211_STA_ASSOC &&
  13616. + new_state == IEEE80211_STA_AUTH &&
  13617. +@@ -3393,12 +4148,12 @@ static int ath10k_sta_state(struct ieee8
  13618. + /*
  13619. + * Disassociation.
  13620. + */
  13621. +- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
  13622. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
  13623. + sta->addr);
  13624. +
  13625. +- ret = ath10k_station_disassoc(ar, arvif, sta);
  13626. ++ ret = ath10k_station_disassoc(ar, vif, sta);
  13627. + if (ret)
  13628. +- ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
  13629. ++ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
  13630. + sta->addr, arvif->vdev_id, ret);
  13631. + }
  13632. + exit:
  13633. +@@ -3407,9 +4162,11 @@ exit:
  13634. + }
  13635. +
  13636. + static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
  13637. +- u16 ac, bool enable)
  13638. ++ u16 ac, bool enable)
  13639. + {
  13640. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13641. ++ struct wmi_sta_uapsd_auto_trig_arg arg = {};
  13642. ++ u32 prio = 0, acc = 0;
  13643. + u32 value = 0;
  13644. + int ret = 0;
  13645. +
  13646. +@@ -3422,18 +4179,26 @@ static int ath10k_conf_tx_uapsd(struct a
  13647. + case IEEE80211_AC_VO:
  13648. + value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
  13649. + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
  13650. ++ prio = 7;
  13651. ++ acc = 3;
  13652. + break;
  13653. + case IEEE80211_AC_VI:
  13654. + value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
  13655. + WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
  13656. ++ prio = 5;
  13657. ++ acc = 2;
  13658. + break;
  13659. + case IEEE80211_AC_BE:
  13660. + value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
  13661. + WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
  13662. ++ prio = 2;
  13663. ++ acc = 1;
  13664. + break;
  13665. + case IEEE80211_AC_BK:
  13666. + value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
  13667. + WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
  13668. ++ prio = 0;
  13669. ++ acc = 0;
  13670. + break;
  13671. + }
  13672. +
  13673. +@@ -3446,7 +4211,7 @@ static int ath10k_conf_tx_uapsd(struct a
  13674. + WMI_STA_PS_PARAM_UAPSD,
  13675. + arvif->u.sta.uapsd);
  13676. + if (ret) {
  13677. +- ath10k_warn("failed to set uapsd params: %d\n", ret);
  13678. ++ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
  13679. + goto exit;
  13680. + }
  13681. +
  13682. +@@ -3459,7 +4224,44 @@ static int ath10k_conf_tx_uapsd(struct a
  13683. + WMI_STA_PS_PARAM_RX_WAKE_POLICY,
  13684. + value);
  13685. + if (ret)
  13686. +- ath10k_warn("failed to set rx wake param: %d\n", ret);
  13687. ++ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
  13688. ++
  13689. ++ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
  13690. ++ if (ret) {
  13691. ++ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
  13692. ++ arvif->vdev_id, ret);
  13693. ++ return ret;
  13694. ++ }
  13695. ++
  13696. ++ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
  13697. ++ if (ret) {
  13698. ++ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
  13699. ++ arvif->vdev_id, ret);
  13700. ++ return ret;
  13701. ++ }
  13702. ++
  13703. ++ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
  13704. ++ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
  13705. ++ /* Only userspace can make an educated decision when to send
  13706. ++ * trigger frame. The following effectively disables u-UAPSD
  13707. ++ * autotrigger in firmware (which is enabled by default
  13708. ++ * provided the autotrigger service is available).
  13709. ++ */
  13710. ++
  13711. ++ arg.wmm_ac = acc;
  13712. ++ arg.user_priority = prio;
  13713. ++ arg.service_interval = 0;
  13714. ++ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
  13715. ++ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
  13716. ++
  13717. ++ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
  13718. ++ arvif->bssid, &arg, 1);
  13719. ++ if (ret) {
  13720. ++ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
  13721. ++ ret);
  13722. ++ return ret;
  13723. ++ }
  13724. ++ }
  13725. +
  13726. + exit:
  13727. + return ret;
  13728. +@@ -3470,6 +4272,7 @@ static int ath10k_conf_tx(struct ieee802
  13729. + const struct ieee80211_tx_queue_params *params)
  13730. + {
  13731. + struct ath10k *ar = hw->priv;
  13732. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13733. + struct wmi_wmm_params_arg *p = NULL;
  13734. + int ret;
  13735. +
  13736. +@@ -3477,16 +4280,16 @@ static int ath10k_conf_tx(struct ieee802
  13737. +
  13738. + switch (ac) {
  13739. + case IEEE80211_AC_VO:
  13740. +- p = &ar->wmm_params.ac_vo;
  13741. ++ p = &arvif->wmm_params.ac_vo;
  13742. + break;
  13743. + case IEEE80211_AC_VI:
  13744. +- p = &ar->wmm_params.ac_vi;
  13745. ++ p = &arvif->wmm_params.ac_vi;
  13746. + break;
  13747. + case IEEE80211_AC_BE:
  13748. +- p = &ar->wmm_params.ac_be;
  13749. ++ p = &arvif->wmm_params.ac_be;
  13750. + break;
  13751. + case IEEE80211_AC_BK:
  13752. +- p = &ar->wmm_params.ac_bk;
  13753. ++ p = &arvif->wmm_params.ac_bk;
  13754. + break;
  13755. + }
  13756. +
  13757. +@@ -3506,16 +4309,28 @@ static int ath10k_conf_tx(struct ieee802
  13758. + */
  13759. + p->txop = params->txop * 32;
  13760. +
  13761. +- /* FIXME: FW accepts wmm params per hw, not per vif */
  13762. +- ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
  13763. +- if (ret) {
  13764. +- ath10k_warn("failed to set wmm params: %d\n", ret);
  13765. +- goto exit;
  13766. ++ if (ar->wmi.ops->gen_vdev_wmm_conf) {
  13767. ++ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
  13768. ++ &arvif->wmm_params);
  13769. ++ if (ret) {
  13770. ++ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
  13771. ++ arvif->vdev_id, ret);
  13772. ++ goto exit;
  13773. ++ }
  13774. ++ } else {
  13775. ++ /* This won't work well with multi-interface cases but it's
  13776. ++ * better than nothing.
  13777. ++ */
  13778. ++ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
  13779. ++ if (ret) {
  13780. ++ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
  13781. ++ goto exit;
  13782. ++ }
  13783. + }
  13784. +
  13785. + ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
  13786. + if (ret)
  13787. +- ath10k_warn("failed to set sta uapsd: %d\n", ret);
  13788. ++ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
  13789. +
  13790. + exit:
  13791. + mutex_unlock(&ar->conf_mutex);
  13792. +@@ -3533,27 +4348,35 @@ static int ath10k_remain_on_channel(stru
  13793. + struct ath10k *ar = hw->priv;
  13794. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  13795. + struct wmi_start_scan_arg arg;
  13796. +- int ret;
  13797. ++ int ret = 0;
  13798. +
  13799. + mutex_lock(&ar->conf_mutex);
  13800. +
  13801. + spin_lock_bh(&ar->data_lock);
  13802. +- if (ar->scan.in_progress) {
  13803. +- spin_unlock_bh(&ar->data_lock);
  13804. ++ switch (ar->scan.state) {
  13805. ++ case ATH10K_SCAN_IDLE:
  13806. ++ reinit_completion(&ar->scan.started);
  13807. ++ reinit_completion(&ar->scan.completed);
  13808. ++ reinit_completion(&ar->scan.on_channel);
  13809. ++ ar->scan.state = ATH10K_SCAN_STARTING;
  13810. ++ ar->scan.is_roc = true;
  13811. ++ ar->scan.vdev_id = arvif->vdev_id;
  13812. ++ ar->scan.roc_freq = chan->center_freq;
  13813. ++ ret = 0;
  13814. ++ break;
  13815. ++ case ATH10K_SCAN_STARTING:
  13816. ++ case ATH10K_SCAN_RUNNING:
  13817. ++ case ATH10K_SCAN_ABORTING:
  13818. + ret = -EBUSY;
  13819. +- goto exit;
  13820. ++ break;
  13821. + }
  13822. +-
  13823. +- reinit_completion(&ar->scan.started);
  13824. +- reinit_completion(&ar->scan.completed);
  13825. +- reinit_completion(&ar->scan.on_channel);
  13826. +- ar->scan.in_progress = true;
  13827. +- ar->scan.aborting = false;
  13828. +- ar->scan.is_roc = true;
  13829. +- ar->scan.vdev_id = arvif->vdev_id;
  13830. +- ar->scan.roc_freq = chan->center_freq;
  13831. + spin_unlock_bh(&ar->data_lock);
  13832. +
  13833. ++ if (ret)
  13834. ++ goto exit;
  13835. ++
  13836. ++ duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
  13837. ++
  13838. + memset(&arg, 0, sizeof(arg));
  13839. + ath10k_wmi_start_scan_init(ar, &arg);
  13840. + arg.vdev_id = arvif->vdev_id;
  13841. +@@ -3568,17 +4391,21 @@ static int ath10k_remain_on_channel(stru
  13842. +
  13843. + ret = ath10k_start_scan(ar, &arg);
  13844. + if (ret) {
  13845. +- ath10k_warn("failed to start roc scan: %d\n", ret);
  13846. ++ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
  13847. + spin_lock_bh(&ar->data_lock);
  13848. +- ar->scan.in_progress = false;
  13849. ++ ar->scan.state = ATH10K_SCAN_IDLE;
  13850. + spin_unlock_bh(&ar->data_lock);
  13851. + goto exit;
  13852. + }
  13853. +
  13854. + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
  13855. + if (ret == 0) {
  13856. +- ath10k_warn("failed to switch to channel for roc scan\n");
  13857. +- ath10k_abort_scan(ar);
  13858. ++ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
  13859. ++
  13860. ++ ret = ath10k_scan_stop(ar);
  13861. ++ if (ret)
  13862. ++ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
  13863. ++
  13864. + ret = -ETIMEDOUT;
  13865. + goto exit;
  13866. + }
  13867. +@@ -3594,9 +4421,11 @@ static int ath10k_cancel_remain_on_chann
  13868. + struct ath10k *ar = hw->priv;
  13869. +
  13870. + mutex_lock(&ar->conf_mutex);
  13871. +- ath10k_abort_scan(ar);
  13872. ++ ath10k_scan_abort(ar);
  13873. + mutex_unlock(&ar->conf_mutex);
  13874. +
  13875. ++ cancel_delayed_work_sync(&ar->scan.timeout);
  13876. ++
  13877. + return 0;
  13878. + }
  13879. +
  13880. +@@ -3613,35 +4442,12 @@ static int ath10k_set_rts_threshold(stru
  13881. +
  13882. + mutex_lock(&ar->conf_mutex);
  13883. + list_for_each_entry(arvif, &ar->arvifs, list) {
  13884. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
  13885. +- arvif->vdev_id, value);
  13886. +-
  13887. +- ret = ath10k_mac_set_rts(arvif, value);
  13888. +- if (ret) {
  13889. +- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
  13890. +- arvif->vdev_id, ret);
  13891. +- break;
  13892. +- }
  13893. +- }
  13894. +- mutex_unlock(&ar->conf_mutex);
  13895. +-
  13896. +- return ret;
  13897. +-}
  13898. +-
  13899. +-static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
  13900. +-{
  13901. +- struct ath10k *ar = hw->priv;
  13902. +- struct ath10k_vif *arvif;
  13903. +- int ret = 0;
  13904. +-
  13905. +- mutex_lock(&ar->conf_mutex);
  13906. +- list_for_each_entry(arvif, &ar->arvifs, list) {
  13907. +- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
  13908. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
  13909. + arvif->vdev_id, value);
  13910. +
  13911. + ret = ath10k_mac_set_rts(arvif, value);
  13912. + if (ret) {
  13913. +- ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
  13914. ++ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
  13915. + arvif->vdev_id, ret);
  13916. + break;
  13917. + }
  13918. +@@ -3675,13 +4481,15 @@ static void ath10k_flush(struct ieee8021
  13919. + empty = (ar->htt.num_pending_tx == 0);
  13920. + spin_unlock_bh(&ar->htt.tx_lock);
  13921. +
  13922. +- skip = (ar->state == ATH10K_STATE_WEDGED);
  13923. ++ skip = (ar->state == ATH10K_STATE_WEDGED) ||
  13924. ++ test_bit(ATH10K_FLAG_CRASH_FLUSH,
  13925. ++ &ar->dev_flags);
  13926. +
  13927. + (empty || skip);
  13928. + }), ATH10K_FLUSH_TIMEOUT_HZ);
  13929. +
  13930. + if (ret <= 0 || skip)
  13931. +- ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
  13932. ++ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
  13933. + skip, ar->state, ret);
  13934. +
  13935. + skip:
  13936. +@@ -3716,7 +4524,7 @@ static int ath10k_suspend(struct ieee802
  13937. +
  13938. + ret = ath10k_hif_suspend(ar);
  13939. + if (ret) {
  13940. +- ath10k_warn("failed to suspend hif: %d\n", ret);
  13941. ++ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
  13942. + goto resume;
  13943. + }
  13944. +
  13945. +@@ -3725,7 +4533,7 @@ static int ath10k_suspend(struct ieee802
  13946. + resume:
  13947. + ret = ath10k_wmi_pdev_resume_target(ar);
  13948. + if (ret)
  13949. +- ath10k_warn("failed to resume target: %d\n", ret);
  13950. ++ ath10k_warn(ar, "failed to resume target: %d\n", ret);
  13951. +
  13952. + ret = 1;
  13953. + exit:
  13954. +@@ -3742,14 +4550,14 @@ static int ath10k_resume(struct ieee8021
  13955. +
  13956. + ret = ath10k_hif_resume(ar);
  13957. + if (ret) {
  13958. +- ath10k_warn("failed to resume hif: %d\n", ret);
  13959. ++ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
  13960. + ret = 1;
  13961. + goto exit;
  13962. + }
  13963. +
  13964. + ret = ath10k_wmi_pdev_resume_target(ar);
  13965. + if (ret) {
  13966. +- ath10k_warn("failed to resume target: %d\n", ret);
  13967. ++ ath10k_warn(ar, "failed to resume target: %d\n", ret);
  13968. + ret = 1;
  13969. + goto exit;
  13970. + }
  13971. +@@ -3770,8 +4578,9 @@ static void ath10k_restart_complete(stru
  13972. + /* If device failed to restart it will be in a different state, e.g.
  13973. + * ATH10K_STATE_WEDGED */
  13974. + if (ar->state == ATH10K_STATE_RESTARTED) {
  13975. +- ath10k_info("device successfully recovered\n");
  13976. ++ ath10k_info(ar, "device successfully recovered\n");
  13977. + ar->state = ATH10K_STATE_ON;
  13978. ++ ieee80211_wake_queues(ar->hw);
  13979. + }
  13980. +
  13981. + mutex_unlock(&ar->conf_mutex);
  13982. +@@ -3807,6 +4616,9 @@ static int ath10k_get_survey(struct ieee
  13983. +
  13984. + survey->channel = &sband->channels[idx];
  13985. +
  13986. ++ if (ar->rx_channel == survey->channel)
  13987. ++ survey->filled |= SURVEY_INFO_IN_USE;
  13988. ++
  13989. + exit:
  13990. + mutex_unlock(&ar->conf_mutex);
  13991. + return ret;
  13992. +@@ -3854,6 +4666,10 @@ ath10k_default_bitrate_mask(struct ath10
  13993. + u32 legacy = 0x00ff;
  13994. + u8 ht = 0xff, i;
  13995. + u16 vht = 0x3ff;
  13996. ++ u16 nrf = ar->num_rf_chains;
  13997. ++
  13998. ++ if (ar->cfg_tx_chainmask)
  13999. ++ nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
  14000. +
  14001. + switch (band) {
  14002. + case IEEE80211_BAND_2GHZ:
  14003. +@@ -3869,11 +4685,11 @@ ath10k_default_bitrate_mask(struct ath10
  14004. + if (mask->control[band].legacy != legacy)
  14005. + return false;
  14006. +
  14007. +- for (i = 0; i < ar->num_rf_chains; i++)
  14008. ++ for (i = 0; i < nrf; i++)
  14009. + if (mask->control[band].ht_mcs[i] != ht)
  14010. + return false;
  14011. +
  14012. +- for (i = 0; i < ar->num_rf_chains; i++)
  14013. ++ for (i = 0; i < nrf; i++)
  14014. + if (mask->control[band].vht_mcs[i] != vht)
  14015. + return false;
  14016. +
  14017. +@@ -3897,8 +4713,8 @@ ath10k_bitrate_mask_nss(const struct cfg
  14018. + continue;
  14019. + else if (mask->control[band].ht_mcs[i] == 0x00)
  14020. + break;
  14021. +- else
  14022. +- return false;
  14023. ++
  14024. ++ return false;
  14025. + }
  14026. +
  14027. + ht_nss = i;
  14028. +@@ -3909,8 +4725,8 @@ ath10k_bitrate_mask_nss(const struct cfg
  14029. + continue;
  14030. + else if (mask->control[band].vht_mcs[i] == 0x0000)
  14031. + break;
  14032. +- else
  14033. +- return false;
  14034. ++
  14035. ++ return false;
  14036. + }
  14037. +
  14038. + vht_nss = i;
  14039. +@@ -3967,7 +4783,8 @@ ath10k_bitrate_mask_correct(const struct
  14040. + }
  14041. +
  14042. + static bool
  14043. +-ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
  14044. ++ath10k_bitrate_mask_rate(struct ath10k *ar,
  14045. ++ const struct cfg80211_bitrate_mask *mask,
  14046. + enum ieee80211_band band,
  14047. + u8 *fixed_rate,
  14048. + u8 *fixed_nss)
  14049. +@@ -4025,7 +4842,7 @@ ath10k_bitrate_mask_rate(const struct cf
  14050. + nss <<= 4;
  14051. + pream <<= 6;
  14052. +
  14053. +- ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
  14054. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
  14055. + pream, nss, rate);
  14056. +
  14057. + *fixed_rate = pream | nss | rate;
  14058. +@@ -4033,7 +4850,8 @@ ath10k_bitrate_mask_rate(const struct cf
  14059. + return true;
  14060. + }
  14061. +
  14062. +-static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
  14063. ++static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
  14064. ++ const struct cfg80211_bitrate_mask *mask,
  14065. + enum ieee80211_band band,
  14066. + u8 *fixed_rate,
  14067. + u8 *fixed_nss)
  14068. +@@ -4043,7 +4861,7 @@ static bool ath10k_get_fixed_rate_nss(co
  14069. + return true;
  14070. +
  14071. + /* Next Check single rate is set */
  14072. +- return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
  14073. ++ return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
  14074. + }
  14075. +
  14076. + static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
  14077. +@@ -4063,16 +4881,16 @@ static int ath10k_set_fixed_rate_param(s
  14078. + goto exit;
  14079. +
  14080. + if (fixed_rate == WMI_FIXED_RATE_NONE)
  14081. +- ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
  14082. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
  14083. +
  14084. + if (force_sgi)
  14085. +- ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
  14086. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
  14087. +
  14088. + vdev_param = ar->wmi.vdev_param->fixed_rate;
  14089. + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
  14090. + vdev_param, fixed_rate);
  14091. + if (ret) {
  14092. +- ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
  14093. ++ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
  14094. + fixed_rate, ret);
  14095. + ret = -EINVAL;
  14096. + goto exit;
  14097. +@@ -4085,7 +4903,7 @@ static int ath10k_set_fixed_rate_param(s
  14098. + vdev_param, fixed_nss);
  14099. +
  14100. + if (ret) {
  14101. +- ath10k_warn("failed to set fixed nss param %d: %d\n",
  14102. ++ ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
  14103. + fixed_nss, ret);
  14104. + ret = -EINVAL;
  14105. + goto exit;
  14106. +@@ -4098,7 +4916,7 @@ static int ath10k_set_fixed_rate_param(s
  14107. + force_sgi);
  14108. +
  14109. + if (ret) {
  14110. +- ath10k_warn("failed to set sgi param %d: %d\n",
  14111. ++ ath10k_warn(ar, "failed to set sgi param %d: %d\n",
  14112. + force_sgi, ret);
  14113. + ret = -EINVAL;
  14114. + goto exit;
  14115. +@@ -4122,19 +4940,22 @@ static int ath10k_set_bitrate_mask(struc
  14116. + u8 fixed_nss = ar->num_rf_chains;
  14117. + u8 force_sgi;
  14118. +
  14119. ++ if (ar->cfg_tx_chainmask)
  14120. ++ fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
  14121. ++
  14122. + force_sgi = mask->control[band].gi;
  14123. + if (force_sgi == NL80211_TXRATE_FORCE_LGI)
  14124. + return -EINVAL;
  14125. +
  14126. + if (!ath10k_default_bitrate_mask(ar, band, mask)) {
  14127. +- if (!ath10k_get_fixed_rate_nss(mask, band,
  14128. ++ if (!ath10k_get_fixed_rate_nss(ar, mask, band,
  14129. + &fixed_rate,
  14130. + &fixed_nss))
  14131. + return -EINVAL;
  14132. + }
  14133. +
  14134. + if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
  14135. +- ath10k_warn("failed to force SGI usage for default rate settings\n");
  14136. ++ ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
  14137. + return -EINVAL;
  14138. + }
  14139. +
  14140. +@@ -4153,7 +4974,7 @@ static void ath10k_sta_rc_update(struct
  14141. +
  14142. + spin_lock_bh(&ar->data_lock);
  14143. +
  14144. +- ath10k_dbg(ATH10K_DBG_MAC,
  14145. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  14146. + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
  14147. + sta->addr, changed, sta->bandwidth, sta->rx_nss,
  14148. + sta->smps_mode);
  14149. +@@ -4172,7 +4993,7 @@ static void ath10k_sta_rc_update(struct
  14150. + bw = WMI_PEER_CHWIDTH_80MHZ;
  14151. + break;
  14152. + case IEEE80211_STA_RX_BW_160:
  14153. +- ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
  14154. ++ ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n",
  14155. + sta->bandwidth, sta->addr);
  14156. + bw = WMI_PEER_CHWIDTH_20MHZ;
  14157. + break;
  14158. +@@ -4199,7 +5020,7 @@ static void ath10k_sta_rc_update(struct
  14159. + smps = WMI_PEER_SMPS_DYNAMIC;
  14160. + break;
  14161. + case IEEE80211_SMPS_NUM_MODES:
  14162. +- ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
  14163. ++ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
  14164. + sta->smps_mode, sta->addr);
  14165. + smps = WMI_PEER_SMPS_PS_NONE;
  14166. + break;
  14167. +@@ -4225,6 +5046,39 @@ static u64 ath10k_get_tsf(struct ieee802
  14168. + return 0;
  14169. + }
  14170. +
  14171. ++static int ath10k_ampdu_action(struct ieee80211_hw *hw,
  14172. ++ struct ieee80211_vif *vif,
  14173. ++ enum ieee80211_ampdu_mlme_action action,
  14174. ++ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
  14175. ++ u8 buf_size)
  14176. ++{
  14177. ++ struct ath10k *ar = hw->priv;
  14178. ++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  14179. ++
  14180. ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
  14181. ++ arvif->vdev_id, sta->addr, tid, action);
  14182. ++
  14183. ++ switch (action) {
  14184. ++ case IEEE80211_AMPDU_RX_START:
  14185. ++ case IEEE80211_AMPDU_RX_STOP:
  14186. ++ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
  14187. ++ * creation/removal. Do we need to verify this?
  14188. ++ */
  14189. ++ return 0;
  14190. ++ case IEEE80211_AMPDU_TX_START:
  14191. ++ case IEEE80211_AMPDU_TX_STOP_CONT:
  14192. ++ case IEEE80211_AMPDU_TX_STOP_FLUSH:
  14193. ++ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
  14194. ++ case IEEE80211_AMPDU_TX_OPERATIONAL:
  14195. ++ /* Firmware offloads Tx aggregation entirely so deny mac80211
  14196. ++ * Tx aggregation requests.
  14197. ++ */
  14198. ++ return -EOPNOTSUPP;
  14199. ++ }
  14200. ++
  14201. ++ return -EINVAL;
  14202. ++}
  14203. ++
  14204. + static const struct ieee80211_ops ath10k_ops = {
  14205. + .tx = ath10k_tx,
  14206. + .start = ath10k_start,
  14207. +@@ -4237,23 +5091,35 @@ static const struct ieee80211_ops ath10k
  14208. + .hw_scan = ath10k_hw_scan,
  14209. + .cancel_hw_scan = ath10k_cancel_hw_scan,
  14210. + .set_key = ath10k_set_key,
  14211. ++ .set_default_unicast_key = ath10k_set_default_unicast_key,
  14212. + .sta_state = ath10k_sta_state,
  14213. + .conf_tx = ath10k_conf_tx,
  14214. + .remain_on_channel = ath10k_remain_on_channel,
  14215. + .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
  14216. + .set_rts_threshold = ath10k_set_rts_threshold,
  14217. +- .set_frag_threshold = ath10k_set_frag_threshold,
  14218. + .flush = ath10k_flush,
  14219. + .tx_last_beacon = ath10k_tx_last_beacon,
  14220. ++ .set_antenna = ath10k_set_antenna,
  14221. ++ .get_antenna = ath10k_get_antenna,
  14222. + .restart_complete = ath10k_restart_complete,
  14223. + .get_survey = ath10k_get_survey,
  14224. + .set_bitrate_mask = ath10k_set_bitrate_mask,
  14225. + .sta_rc_update = ath10k_sta_rc_update,
  14226. + .get_tsf = ath10k_get_tsf,
  14227. ++ .ampdu_action = ath10k_ampdu_action,
  14228. ++ .get_et_sset_count = ath10k_debug_get_et_sset_count,
  14229. ++ .get_et_stats = ath10k_debug_get_et_stats,
  14230. ++ .get_et_strings = ath10k_debug_get_et_strings,
  14231. ++
  14232. ++ CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
  14233. ++
  14234. + #ifdef CONFIG_PM
  14235. + .suspend = ath10k_suspend,
  14236. + .resume = ath10k_resume,
  14237. + #endif
  14238. ++#ifdef CPTCFG_MAC80211_DEBUGFS
  14239. ++ .sta_add_debugfs = ath10k_sta_add_debugfs,
  14240. ++#endif
  14241. + };
  14242. +
  14243. + #define RATETAB_ENT(_rate, _rateid, _flags) { \
  14244. +@@ -4324,6 +5190,9 @@ static const struct ieee80211_channel at
  14245. + CHAN5G(165, 5825, 0),
  14246. + };
  14247. +
  14248. ++/* Note: Be careful if you re-order these. There is code which depends on this
  14249. ++ * ordering.
  14250. ++ */
  14251. + static struct ieee80211_rate ath10k_rates[] = {
  14252. + /* CCK */
  14253. + RATETAB_ENT(10, 0x82, 0),
  14254. +@@ -4346,12 +5215,12 @@ static struct ieee80211_rate ath10k_rate
  14255. + #define ath10k_g_rates (ath10k_rates + 0)
  14256. + #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
  14257. +
  14258. +-struct ath10k *ath10k_mac_create(void)
  14259. ++struct ath10k *ath10k_mac_create(size_t priv_size)
  14260. + {
  14261. + struct ieee80211_hw *hw;
  14262. + struct ath10k *ar;
  14263. +
  14264. +- hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
  14265. ++ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
  14266. + if (!hw)
  14267. + return NULL;
  14268. +
  14269. +@@ -4377,6 +5246,10 @@ static const struct ieee80211_iface_limi
  14270. + .types = BIT(NL80211_IFTYPE_P2P_GO)
  14271. + },
  14272. + {
  14273. ++ .max = 1,
  14274. ++ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
  14275. ++ },
  14276. ++ {
  14277. + .max = 7,
  14278. + .types = BIT(NL80211_IFTYPE_AP)
  14279. + },
  14280. +@@ -4501,7 +5374,6 @@ static struct ieee80211_sta_ht_cap ath10
  14281. + return ht_cap;
  14282. + }
  14283. +
  14284. +-
  14285. + static void ath10k_get_arvif_iter(void *data, u8 *mac,
  14286. + struct ieee80211_vif *vif)
  14287. + {
  14288. +@@ -4526,7 +5398,7 @@ struct ath10k_vif *ath10k_get_arvif(stru
  14289. + ath10k_get_arvif_iter,
  14290. + &arvif_iter);
  14291. + if (!arvif_iter.arvif) {
  14292. +- ath10k_warn("No VIF found for vdev %d\n", vdev_id);
  14293. ++ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
  14294. + return NULL;
  14295. + }
  14296. +
  14297. +@@ -4564,7 +5436,8 @@ int ath10k_mac_register(struct ath10k *a
  14298. + band->bitrates = ath10k_g_rates;
  14299. + band->ht_cap = ht_cap;
  14300. +
  14301. +- /* vht is not supported in 2.4 GHz */
  14302. ++ /* Enable the VHT support at 2.4 GHz */
  14303. ++ band->vht_cap = vht_cap;
  14304. +
  14305. + ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
  14306. + }
  14307. +@@ -4590,18 +5463,20 @@ int ath10k_mac_register(struct ath10k *a
  14308. +
  14309. + ar->hw->wiphy->interface_modes =
  14310. + BIT(NL80211_IFTYPE_STATION) |
  14311. +- BIT(NL80211_IFTYPE_ADHOC) |
  14312. + BIT(NL80211_IFTYPE_AP);
  14313. +
  14314. ++ ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
  14315. ++ ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
  14316. ++
  14317. + if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
  14318. + ar->hw->wiphy->interface_modes |=
  14319. ++ BIT(NL80211_IFTYPE_P2P_DEVICE) |
  14320. + BIT(NL80211_IFTYPE_P2P_CLIENT) |
  14321. + BIT(NL80211_IFTYPE_P2P_GO);
  14322. +
  14323. + ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
  14324. + IEEE80211_HW_SUPPORTS_PS |
  14325. + IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
  14326. +- IEEE80211_HW_SUPPORTS_UAPSD |
  14327. + IEEE80211_HW_MFP_CAPABLE |
  14328. + IEEE80211_HW_REPORTS_TX_ACK_STATUS |
  14329. + IEEE80211_HW_HAS_RATE_CONTROL |
  14330. +@@ -4609,10 +5484,6 @@ int ath10k_mac_register(struct ath10k *a
  14331. + IEEE80211_HW_AP_LINK_PS |
  14332. + IEEE80211_HW_SPECTRUM_MGMT;
  14333. +
  14334. +- /* MSDU can have HTT TX fragment pushed in front. The additional 4
  14335. +- * bytes is used for padding/alignment if necessary. */
  14336. +- ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
  14337. +-
  14338. + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
  14339. + ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
  14340. +
  14341. +@@ -4629,25 +5500,52 @@ int ath10k_mac_register(struct ath10k *a
  14342. +
  14343. + ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
  14344. +
  14345. ++ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
  14346. ++ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
  14347. ++
  14348. ++ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
  14349. ++ * that userspace (e.g. wpa_supplicant/hostapd) can generate
  14350. ++ * correct Probe Responses. This is more of a hack advert..
  14351. ++ */
  14352. ++ ar->hw->wiphy->probe_resp_offload |=
  14353. ++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
  14354. ++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
  14355. ++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
  14356. ++ }
  14357. ++
  14358. + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
  14359. + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
  14360. + ar->hw->wiphy->max_remain_on_channel_duration = 5000;
  14361. +
  14362. + ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
  14363. ++ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
  14364. ++
  14365. + /*
  14366. + * on LL hardware queues are managed entirely by the FW
  14367. + * so we only advertise to mac we can do the queues thing
  14368. + */
  14369. + ar->hw->queues = 4;
  14370. +
  14371. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  14372. +- ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
  14373. +- ar->hw->wiphy->n_iface_combinations =
  14374. +- ARRAY_SIZE(ath10k_10x_if_comb);
  14375. +- } else {
  14376. ++ switch (ar->wmi.op_version) {
  14377. ++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
  14378. ++ case ATH10K_FW_WMI_OP_VERSION_TLV:
  14379. + ar->hw->wiphy->iface_combinations = ath10k_if_comb;
  14380. + ar->hw->wiphy->n_iface_combinations =
  14381. + ARRAY_SIZE(ath10k_if_comb);
  14382. ++ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
  14383. ++ break;
  14384. ++ case ATH10K_FW_WMI_OP_VERSION_10_1:
  14385. ++ case ATH10K_FW_WMI_OP_VERSION_10_2:
  14386. ++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
  14387. ++ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
  14388. ++ ar->hw->wiphy->n_iface_combinations =
  14389. ++ ARRAY_SIZE(ath10k_10x_if_comb);
  14390. ++ break;
  14391. ++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
  14392. ++ case ATH10K_FW_WMI_OP_VERSION_MAX:
  14393. ++ WARN_ON(1);
  14394. ++ ret = -EINVAL;
  14395. ++ goto err_free;
  14396. + }
  14397. +
  14398. + ar->hw->netdev_features = NETIF_F_HW_CSUM;
  14399. +@@ -4659,19 +5557,19 @@ int ath10k_mac_register(struct ath10k *a
  14400. + NL80211_DFS_UNSET);
  14401. +
  14402. + if (!ar->dfs_detector)
  14403. +- ath10k_warn("failed to initialise DFS pattern detector\n");
  14404. ++ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
  14405. + }
  14406. +
  14407. + ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
  14408. + ath10k_reg_notifier);
  14409. + if (ret) {
  14410. +- ath10k_err("failed to initialise regulatory: %i\n", ret);
  14411. ++ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
  14412. + goto err_free;
  14413. + }
  14414. +
  14415. + ret = ieee80211_register_hw(ar->hw);
  14416. + if (ret) {
  14417. +- ath10k_err("failed to register ieee80211: %d\n", ret);
  14418. ++ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
  14419. + goto err_free;
  14420. + }
  14421. +
  14422. +--- a/drivers/net/wireless/ath/ath10k/mac.h
  14423. ++++ b/drivers/net/wireless/ath/ath10k/mac.h
  14424. +@@ -21,33 +21,41 @@
  14425. + #include <net/mac80211.h>
  14426. + #include "core.h"
  14427. +
  14428. ++#define WEP_KEYID_SHIFT 6
  14429. ++
  14430. + struct ath10k_generic_iter {
  14431. + struct ath10k *ar;
  14432. + int ret;
  14433. + };
  14434. +
  14435. +-struct ath10k *ath10k_mac_create(void);
  14436. ++struct ath10k *ath10k_mac_create(size_t priv_size);
  14437. + void ath10k_mac_destroy(struct ath10k *ar);
  14438. + int ath10k_mac_register(struct ath10k *ar);
  14439. + void ath10k_mac_unregister(struct ath10k *ar);
  14440. + struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
  14441. +-void ath10k_reset_scan(unsigned long ptr);
  14442. ++void __ath10k_scan_finish(struct ath10k *ar);
  14443. ++void ath10k_scan_finish(struct ath10k *ar);
  14444. ++void ath10k_scan_timeout_work(struct work_struct *work);
  14445. + void ath10k_offchan_tx_purge(struct ath10k *ar);
  14446. + void ath10k_offchan_tx_work(struct work_struct *work);
  14447. + void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
  14448. + void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
  14449. + void ath10k_halt(struct ath10k *ar);
  14450. ++void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
  14451. ++void ath10k_drain_tx(struct ath10k *ar);
  14452. ++bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
  14453. ++ u8 keyidx);
  14454. +
  14455. + static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
  14456. + {
  14457. + return (struct ath10k_vif *)vif->drv_priv;
  14458. + }
  14459. +
  14460. +-static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
  14461. ++static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
  14462. ++ struct sk_buff *skb)
  14463. + {
  14464. + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  14465. + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  14466. +- struct ieee80211_vif *vif = info->control.vif;
  14467. + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  14468. +
  14469. + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  14470. +--- a/drivers/net/wireless/ath/ath10k/pci.c
  14471. ++++ b/drivers/net/wireless/ath/ath10k/pci.c
  14472. +@@ -44,13 +44,9 @@ enum ath10k_pci_reset_mode {
  14473. + ATH10K_PCI_RESET_WARM_ONLY = 1,
  14474. + };
  14475. +
  14476. +-static unsigned int ath10k_pci_target_ps;
  14477. + static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  14478. + static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  14479. +
  14480. +-module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
  14481. +-MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
  14482. +-
  14483. + module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  14484. + MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  14485. +
  14486. +@@ -59,21 +55,31 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1
  14487. +
  14488. + /* how long wait to wait for target to initialise, in ms */
  14489. + #define ATH10K_PCI_TARGET_WAIT 3000
  14490. ++#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  14491. +
  14492. + #define QCA988X_2_0_DEVICE_ID (0x003c)
  14493. ++#define QCA6174_2_1_DEVICE_ID (0x003e)
  14494. +
  14495. +-static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
  14496. ++static const struct pci_device_id ath10k_pci_id_table[] = {
  14497. + { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  14498. ++ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  14499. + {0}
  14500. + };
  14501. +
  14502. +-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  14503. +- u32 *data);
  14504. ++static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  14505. ++ /* QCA988X pre 2.0 chips are not supported because they need some nasty
  14506. ++ * hacks. ath10k doesn't have them and these devices crash horribly
  14507. ++ * because of that.
  14508. ++ */
  14509. ++ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  14510. ++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  14511. ++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  14512. ++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  14513. ++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  14514. ++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  14515. ++};
  14516. +
  14517. +-static int ath10k_pci_post_rx(struct ath10k *ar);
  14518. +-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  14519. +- int num);
  14520. +-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
  14521. ++static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  14522. + static int ath10k_pci_cold_reset(struct ath10k *ar);
  14523. + static int ath10k_pci_warm_reset(struct ath10k *ar);
  14524. + static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  14525. +@@ -98,7 +104,7 @@ static const struct ce_attr host_ce_conf
  14526. + {
  14527. + .flags = CE_ATTR_FLAGS,
  14528. + .src_nentries = 0,
  14529. +- .src_sz_max = 512,
  14530. ++ .src_sz_max = 2048,
  14531. + .dest_nentries = 512,
  14532. + },
  14533. +
  14534. +@@ -155,79 +161,175 @@ static const struct ce_attr host_ce_conf
  14535. + static const struct ce_pipe_config target_ce_config_wlan[] = {
  14536. + /* CE0: host->target HTC control and raw streams */
  14537. + {
  14538. +- .pipenum = 0,
  14539. +- .pipedir = PIPEDIR_OUT,
  14540. +- .nentries = 32,
  14541. +- .nbytes_max = 256,
  14542. +- .flags = CE_ATTR_FLAGS,
  14543. +- .reserved = 0,
  14544. ++ .pipenum = __cpu_to_le32(0),
  14545. ++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  14546. ++ .nentries = __cpu_to_le32(32),
  14547. ++ .nbytes_max = __cpu_to_le32(256),
  14548. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14549. ++ .reserved = __cpu_to_le32(0),
  14550. + },
  14551. +
  14552. + /* CE1: target->host HTT + HTC control */
  14553. + {
  14554. +- .pipenum = 1,
  14555. +- .pipedir = PIPEDIR_IN,
  14556. +- .nentries = 32,
  14557. +- .nbytes_max = 512,
  14558. +- .flags = CE_ATTR_FLAGS,
  14559. +- .reserved = 0,
  14560. ++ .pipenum = __cpu_to_le32(1),
  14561. ++ .pipedir = __cpu_to_le32(PIPEDIR_IN),
  14562. ++ .nentries = __cpu_to_le32(32),
  14563. ++ .nbytes_max = __cpu_to_le32(2048),
  14564. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14565. ++ .reserved = __cpu_to_le32(0),
  14566. + },
  14567. +
  14568. + /* CE2: target->host WMI */
  14569. + {
  14570. +- .pipenum = 2,
  14571. +- .pipedir = PIPEDIR_IN,
  14572. +- .nentries = 32,
  14573. +- .nbytes_max = 2048,
  14574. +- .flags = CE_ATTR_FLAGS,
  14575. +- .reserved = 0,
  14576. ++ .pipenum = __cpu_to_le32(2),
  14577. ++ .pipedir = __cpu_to_le32(PIPEDIR_IN),
  14578. ++ .nentries = __cpu_to_le32(32),
  14579. ++ .nbytes_max = __cpu_to_le32(2048),
  14580. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14581. ++ .reserved = __cpu_to_le32(0),
  14582. + },
  14583. +
  14584. + /* CE3: host->target WMI */
  14585. + {
  14586. +- .pipenum = 3,
  14587. +- .pipedir = PIPEDIR_OUT,
  14588. +- .nentries = 32,
  14589. +- .nbytes_max = 2048,
  14590. +- .flags = CE_ATTR_FLAGS,
  14591. +- .reserved = 0,
  14592. ++ .pipenum = __cpu_to_le32(3),
  14593. ++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  14594. ++ .nentries = __cpu_to_le32(32),
  14595. ++ .nbytes_max = __cpu_to_le32(2048),
  14596. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14597. ++ .reserved = __cpu_to_le32(0),
  14598. + },
  14599. +
  14600. + /* CE4: host->target HTT */
  14601. + {
  14602. +- .pipenum = 4,
  14603. +- .pipedir = PIPEDIR_OUT,
  14604. +- .nentries = 256,
  14605. +- .nbytes_max = 256,
  14606. +- .flags = CE_ATTR_FLAGS,
  14607. +- .reserved = 0,
  14608. ++ .pipenum = __cpu_to_le32(4),
  14609. ++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  14610. ++ .nentries = __cpu_to_le32(256),
  14611. ++ .nbytes_max = __cpu_to_le32(256),
  14612. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14613. ++ .reserved = __cpu_to_le32(0),
  14614. + },
  14615. +
  14616. + /* NB: 50% of src nentries, since tx has 2 frags */
  14617. +
  14618. + /* CE5: unused */
  14619. + {
  14620. +- .pipenum = 5,
  14621. +- .pipedir = PIPEDIR_OUT,
  14622. +- .nentries = 32,
  14623. +- .nbytes_max = 2048,
  14624. +- .flags = CE_ATTR_FLAGS,
  14625. +- .reserved = 0,
  14626. ++ .pipenum = __cpu_to_le32(5),
  14627. ++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  14628. ++ .nentries = __cpu_to_le32(32),
  14629. ++ .nbytes_max = __cpu_to_le32(2048),
  14630. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14631. ++ .reserved = __cpu_to_le32(0),
  14632. + },
  14633. +
  14634. + /* CE6: Reserved for target autonomous hif_memcpy */
  14635. + {
  14636. +- .pipenum = 6,
  14637. +- .pipedir = PIPEDIR_INOUT,
  14638. +- .nentries = 32,
  14639. +- .nbytes_max = 4096,
  14640. +- .flags = CE_ATTR_FLAGS,
  14641. +- .reserved = 0,
  14642. ++ .pipenum = __cpu_to_le32(6),
  14643. ++ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
  14644. ++ .nentries = __cpu_to_le32(32),
  14645. ++ .nbytes_max = __cpu_to_le32(4096),
  14646. ++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  14647. ++ .reserved = __cpu_to_le32(0),
  14648. + },
  14649. +
  14650. + /* CE7 used only by Host */
  14651. + };
  14652. +
  14653. ++/*
  14654. ++ * Map from service/endpoint to Copy Engine.
  14655. ++ * This table is derived from the CE_PCI TABLE, above.
  14656. ++ * It is passed to the Target at startup for use by firmware.
  14657. ++ */
  14658. ++static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  14659. ++ {
  14660. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  14661. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14662. ++ __cpu_to_le32(3),
  14663. ++ },
  14664. ++ {
  14665. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  14666. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14667. ++ __cpu_to_le32(2),
  14668. ++ },
  14669. ++ {
  14670. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  14671. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14672. ++ __cpu_to_le32(3),
  14673. ++ },
  14674. ++ {
  14675. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  14676. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14677. ++ __cpu_to_le32(2),
  14678. ++ },
  14679. ++ {
  14680. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  14681. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14682. ++ __cpu_to_le32(3),
  14683. ++ },
  14684. ++ {
  14685. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  14686. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14687. ++ __cpu_to_le32(2),
  14688. ++ },
  14689. ++ {
  14690. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  14691. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14692. ++ __cpu_to_le32(3),
  14693. ++ },
  14694. ++ {
  14695. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  14696. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14697. ++ __cpu_to_le32(2),
  14698. ++ },
  14699. ++ {
  14700. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  14701. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14702. ++ __cpu_to_le32(3),
  14703. ++ },
  14704. ++ {
  14705. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  14706. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14707. ++ __cpu_to_le32(2),
  14708. ++ },
  14709. ++ {
  14710. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  14711. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14712. ++ __cpu_to_le32(0),
  14713. ++ },
  14714. ++ {
  14715. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  14716. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14717. ++ __cpu_to_le32(1),
  14718. ++ },
  14719. ++ { /* not used */
  14720. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  14721. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14722. ++ __cpu_to_le32(0),
  14723. ++ },
  14724. ++ { /* not used */
  14725. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  14726. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14727. ++ __cpu_to_le32(1),
  14728. ++ },
  14729. ++ {
  14730. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  14731. ++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  14732. ++ __cpu_to_le32(4),
  14733. ++ },
  14734. ++ {
  14735. ++ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  14736. ++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  14737. ++ __cpu_to_le32(1),
  14738. ++ },
  14739. ++
  14740. ++ /* (Additions here) */
  14741. ++
  14742. ++ { /* must be last */
  14743. ++ __cpu_to_le32(0),
  14744. ++ __cpu_to_le32(0),
  14745. ++ __cpu_to_le32(0),
  14746. ++ },
  14747. ++};
  14748. ++
  14749. + static bool ath10k_pci_irq_pending(struct ath10k *ar)
  14750. + {
  14751. + u32 cause;
  14752. +@@ -253,8 +355,8 @@ static void ath10k_pci_disable_and_clear
  14753. +
  14754. + /* IMPORTANT: this extra read transaction is required to
  14755. + * flush the posted write buffer. */
  14756. +- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  14757. +- PCIE_INTR_ENABLE_ADDRESS);
  14758. ++ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  14759. ++ PCIE_INTR_ENABLE_ADDRESS);
  14760. + }
  14761. +
  14762. + static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
  14763. +@@ -265,48 +367,116 @@ static void ath10k_pci_enable_legacy_irq
  14764. +
  14765. + /* IMPORTANT: this extra read transaction is required to
  14766. + * flush the posted write buffer. */
  14767. +- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  14768. +- PCIE_INTR_ENABLE_ADDRESS);
  14769. ++ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  14770. ++ PCIE_INTR_ENABLE_ADDRESS);
  14771. + }
  14772. +
  14773. +-static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
  14774. ++static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
  14775. + {
  14776. +- struct ath10k *ar = arg;
  14777. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  14778. +
  14779. +- if (ar_pci->num_msi_intrs == 0) {
  14780. +- if (!ath10k_pci_irq_pending(ar))
  14781. +- return IRQ_NONE;
  14782. +-
  14783. +- ath10k_pci_disable_and_clear_legacy_irq(ar);
  14784. +- }
  14785. ++ if (ar_pci->num_msi_intrs > 1)
  14786. ++ return "msi-x";
  14787. +
  14788. +- tasklet_schedule(&ar_pci->early_irq_tasklet);
  14789. ++ if (ar_pci->num_msi_intrs == 1)
  14790. ++ return "msi";
  14791. +
  14792. +- return IRQ_HANDLED;
  14793. ++ return "legacy";
  14794. + }
  14795. +
  14796. +-static int ath10k_pci_request_early_irq(struct ath10k *ar)
  14797. ++static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
  14798. + {
  14799. ++ struct ath10k *ar = pipe->hif_ce_state;
  14800. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  14801. ++ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  14802. ++ struct sk_buff *skb;
  14803. ++ dma_addr_t paddr;
  14804. + int ret;
  14805. +
  14806. +- /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
  14807. +- * interrupt from irq vector is triggered in all cases for FW
  14808. +- * indication/errors */
  14809. +- ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
  14810. +- IRQF_SHARED, "ath10k_pci (early)", ar);
  14811. ++ lockdep_assert_held(&ar_pci->ce_lock);
  14812. ++
  14813. ++ skb = dev_alloc_skb(pipe->buf_sz);
  14814. ++ if (!skb)
  14815. ++ return -ENOMEM;
  14816. ++
  14817. ++ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  14818. ++
  14819. ++ paddr = dma_map_single(ar->dev, skb->data,
  14820. ++ skb->len + skb_tailroom(skb),
  14821. ++ DMA_FROM_DEVICE);
  14822. ++ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
  14823. ++ ath10k_warn(ar, "failed to dma map pci rx buf\n");
  14824. ++ dev_kfree_skb_any(skb);
  14825. ++ return -EIO;
  14826. ++ }
  14827. ++
  14828. ++ ATH10K_SKB_RXCB(skb)->paddr = paddr;
  14829. ++
  14830. ++ ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
  14831. + if (ret) {
  14832. +- ath10k_warn("failed to request early irq: %d\n", ret);
  14833. ++ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
  14834. ++ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
  14835. ++ DMA_FROM_DEVICE);
  14836. ++ dev_kfree_skb_any(skb);
  14837. + return ret;
  14838. + }
  14839. +
  14840. + return 0;
  14841. + }
  14842. +
  14843. +-static void ath10k_pci_free_early_irq(struct ath10k *ar)
  14844. ++static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
  14845. ++{
  14846. ++ struct ath10k *ar = pipe->hif_ce_state;
  14847. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  14848. ++ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  14849. ++ int ret, num;
  14850. ++
  14851. ++ lockdep_assert_held(&ar_pci->ce_lock);
  14852. ++
  14853. ++ if (pipe->buf_sz == 0)
  14854. ++ return;
  14855. ++
  14856. ++ if (!ce_pipe->dest_ring)
  14857. ++ return;
  14858. ++
  14859. ++ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
  14860. ++ while (num--) {
  14861. ++ ret = __ath10k_pci_rx_post_buf(pipe);
  14862. ++ if (ret) {
  14863. ++ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
  14864. ++ mod_timer(&ar_pci->rx_post_retry, jiffies +
  14865. ++ ATH10K_PCI_RX_POST_RETRY_MS);
  14866. ++ break;
  14867. ++ }
  14868. ++ }
  14869. ++}
  14870. ++
  14871. ++static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
  14872. ++{
  14873. ++ struct ath10k *ar = pipe->hif_ce_state;
  14874. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  14875. ++
  14876. ++ spin_lock_bh(&ar_pci->ce_lock);
  14877. ++ __ath10k_pci_rx_post_pipe(pipe);
  14878. ++ spin_unlock_bh(&ar_pci->ce_lock);
  14879. ++}
  14880. ++
  14881. ++static void ath10k_pci_rx_post(struct ath10k *ar)
  14882. ++{
  14883. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  14884. ++ int i;
  14885. ++
  14886. ++ spin_lock_bh(&ar_pci->ce_lock);
  14887. ++ for (i = 0; i < CE_COUNT; i++)
  14888. ++ __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
  14889. ++ spin_unlock_bh(&ar_pci->ce_lock);
  14890. ++}
  14891. ++
  14892. ++static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
  14893. + {
  14894. +- free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
  14895. ++ struct ath10k *ar = (void *)ptr;
  14896. ++
  14897. ++ ath10k_pci_rx_post(ar);
  14898. + }
  14899. +
  14900. + /*
  14901. +@@ -330,24 +500,7 @@ static int ath10k_pci_diag_read_mem(stru
  14902. + void *data_buf = NULL;
  14903. + int i;
  14904. +
  14905. +- /*
  14906. +- * This code cannot handle reads to non-memory space. Redirect to the
  14907. +- * register read fn but preserve the multi word read capability of
  14908. +- * this fn
  14909. +- */
  14910. +- if (address < DRAM_BASE_ADDRESS) {
  14911. +- if (!IS_ALIGNED(address, 4) ||
  14912. +- !IS_ALIGNED((unsigned long)data, 4))
  14913. +- return -EIO;
  14914. +-
  14915. +- while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
  14916. +- ar, address, (u32 *)data)) == 0)) {
  14917. +- nbytes -= sizeof(u32);
  14918. +- address += sizeof(u32);
  14919. +- data += sizeof(u32);
  14920. +- }
  14921. +- return ret;
  14922. +- }
  14923. ++ spin_lock_bh(&ar_pci->ce_lock);
  14924. +
  14925. + ce_diag = ar_pci->ce_diag;
  14926. +
  14927. +@@ -375,7 +528,7 @@ static int ath10k_pci_diag_read_mem(stru
  14928. + nbytes = min_t(unsigned int, remaining_bytes,
  14929. + DIAG_TRANSFER_LIMIT);
  14930. +
  14931. +- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
  14932. ++ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
  14933. + if (ret != 0)
  14934. + goto done;
  14935. +
  14936. +@@ -388,20 +541,18 @@ static int ath10k_pci_diag_read_mem(stru
  14937. + * convert it from Target CPU virtual address space
  14938. + * to CE address space
  14939. + */
  14940. +- ath10k_pci_wake(ar);
  14941. + address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
  14942. + address);
  14943. +- ath10k_pci_sleep(ar);
  14944. +
  14945. +- ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
  14946. +- 0);
  14947. ++ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
  14948. ++ 0);
  14949. + if (ret)
  14950. + goto done;
  14951. +
  14952. + i = 0;
  14953. +- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  14954. +- &completed_nbytes,
  14955. +- &id) != 0) {
  14956. ++ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
  14957. ++ &completed_nbytes,
  14958. ++ &id) != 0) {
  14959. + mdelay(1);
  14960. + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  14961. + ret = -EBUSY;
  14962. +@@ -414,15 +565,15 @@ static int ath10k_pci_diag_read_mem(stru
  14963. + goto done;
  14964. + }
  14965. +
  14966. +- if (buf != (u32) address) {
  14967. ++ if (buf != (u32)address) {
  14968. + ret = -EIO;
  14969. + goto done;
  14970. + }
  14971. +
  14972. + i = 0;
  14973. +- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  14974. +- &completed_nbytes,
  14975. +- &id, &flags) != 0) {
  14976. ++ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
  14977. ++ &completed_nbytes,
  14978. ++ &id, &flags) != 0) {
  14979. + mdelay(1);
  14980. +
  14981. + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  14982. +@@ -447,38 +598,60 @@ static int ath10k_pci_diag_read_mem(stru
  14983. + }
  14984. +
  14985. + done:
  14986. +- if (ret == 0) {
  14987. +- /* Copy data from allocated DMA buf to caller's buf */
  14988. +- WARN_ON_ONCE(orig_nbytes & 3);
  14989. +- for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
  14990. +- ((u32 *)data)[i] =
  14991. +- __le32_to_cpu(((__le32 *)data_buf)[i]);
  14992. +- }
  14993. +- } else
  14994. +- ath10k_warn("failed to read diag value at 0x%x: %d\n",
  14995. ++ if (ret == 0)
  14996. ++ memcpy(data, data_buf, orig_nbytes);
  14997. ++ else
  14998. ++ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
  14999. + address, ret);
  15000. +
  15001. + if (data_buf)
  15002. + dma_free_coherent(ar->dev, orig_nbytes, data_buf,
  15003. + ce_data_base);
  15004. +
  15005. ++ spin_unlock_bh(&ar_pci->ce_lock);
  15006. ++
  15007. + return ret;
  15008. + }
  15009. +
  15010. +-/* Read 4-byte aligned data from Target memory or register */
  15011. +-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  15012. +- u32 *data)
  15013. +-{
  15014. +- /* Assume range doesn't cross this boundary */
  15015. +- if (address >= DRAM_BASE_ADDRESS)
  15016. +- return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
  15017. ++static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
  15018. ++{
  15019. ++ __le32 val = 0;
  15020. ++ int ret;
  15021. ++
  15022. ++ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
  15023. ++ *value = __le32_to_cpu(val);
  15024. ++
  15025. ++ return ret;
  15026. ++}
  15027. ++
  15028. ++static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
  15029. ++ u32 src, u32 len)
  15030. ++{
  15031. ++ u32 host_addr, addr;
  15032. ++ int ret;
  15033. ++
  15034. ++ host_addr = host_interest_item_address(src);
  15035. ++
  15036. ++ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
  15037. ++ if (ret != 0) {
  15038. ++ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
  15039. ++ src, ret);
  15040. ++ return ret;
  15041. ++ }
  15042. ++
  15043. ++ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
  15044. ++ if (ret != 0) {
  15045. ++ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
  15046. ++ addr, len, ret);
  15047. ++ return ret;
  15048. ++ }
  15049. +
  15050. +- ath10k_pci_wake(ar);
  15051. +- *data = ath10k_pci_read32(ar, address);
  15052. +- ath10k_pci_sleep(ar);
  15053. + return 0;
  15054. + }
  15055. +
  15056. ++#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
  15057. ++ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
  15058. ++
  15059. + static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
  15060. + const void *data, int nbytes)
  15061. + {
  15062. +@@ -494,6 +667,8 @@ static int ath10k_pci_diag_write_mem(str
  15063. + dma_addr_t ce_data_base = 0;
  15064. + int i;
  15065. +
  15066. ++ spin_lock_bh(&ar_pci->ce_lock);
  15067. ++
  15068. + ce_diag = ar_pci->ce_diag;
  15069. +
  15070. + /*
  15071. +@@ -513,9 +688,7 @@ static int ath10k_pci_diag_write_mem(str
  15072. + }
  15073. +
  15074. + /* Copy caller's data to allocated DMA buf */
  15075. +- WARN_ON_ONCE(orig_nbytes & 3);
  15076. +- for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
  15077. +- ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
  15078. ++ memcpy(data_buf, data, orig_nbytes);
  15079. +
  15080. + /*
  15081. + * The address supplied by the caller is in the
  15082. +@@ -527,9 +700,7 @@ static int ath10k_pci_diag_write_mem(str
  15083. + * to
  15084. + * CE address space
  15085. + */
  15086. +- ath10k_pci_wake(ar);
  15087. + address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
  15088. +- ath10k_pci_sleep(ar);
  15089. +
  15090. + remaining_bytes = orig_nbytes;
  15091. + ce_data = ce_data_base;
  15092. +@@ -538,7 +709,7 @@ static int ath10k_pci_diag_write_mem(str
  15093. + nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
  15094. +
  15095. + /* Set up to receive directly into Target(!) address */
  15096. +- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
  15097. ++ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
  15098. + if (ret != 0)
  15099. + goto done;
  15100. +
  15101. +@@ -546,15 +717,15 @@ static int ath10k_pci_diag_write_mem(str
  15102. + * Request CE to send caller-supplied data that
  15103. + * was copied to bounce buffer to Target(!) address.
  15104. + */
  15105. +- ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
  15106. +- nbytes, 0, 0);
  15107. ++ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
  15108. ++ nbytes, 0, 0);
  15109. + if (ret != 0)
  15110. + goto done;
  15111. +
  15112. + i = 0;
  15113. +- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
  15114. +- &completed_nbytes,
  15115. +- &id) != 0) {
  15116. ++ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
  15117. ++ &completed_nbytes,
  15118. ++ &id) != 0) {
  15119. + mdelay(1);
  15120. +
  15121. + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  15122. +@@ -574,9 +745,9 @@ static int ath10k_pci_diag_write_mem(str
  15123. + }
  15124. +
  15125. + i = 0;
  15126. +- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
  15127. +- &completed_nbytes,
  15128. +- &id, &flags) != 0) {
  15129. ++ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
  15130. ++ &completed_nbytes,
  15131. ++ &id, &flags) != 0) {
  15132. + mdelay(1);
  15133. +
  15134. + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
  15135. +@@ -607,66 +778,36 @@ done:
  15136. + }
  15137. +
  15138. + if (ret != 0)
  15139. +- ath10k_warn("failed to write diag value at 0x%x: %d\n",
  15140. ++ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
  15141. + address, ret);
  15142. +
  15143. ++ spin_unlock_bh(&ar_pci->ce_lock);
  15144. ++
  15145. + return ret;
  15146. + }
  15147. +
  15148. +-/* Write 4B data to Target memory or register */
  15149. +-static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
  15150. +- u32 data)
  15151. +-{
  15152. +- /* Assume range doesn't cross this boundary */
  15153. +- if (address >= DRAM_BASE_ADDRESS)
  15154. +- return ath10k_pci_diag_write_mem(ar, address, &data,
  15155. +- sizeof(u32));
  15156. ++static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
  15157. ++{
  15158. ++ __le32 val = __cpu_to_le32(value);
  15159. +
  15160. +- ath10k_pci_wake(ar);
  15161. +- ath10k_pci_write32(ar, address, data);
  15162. +- ath10k_pci_sleep(ar);
  15163. +- return 0;
  15164. ++ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
  15165. + }
  15166. +
  15167. +-static bool ath10k_pci_target_is_awake(struct ath10k *ar)
  15168. ++static bool ath10k_pci_is_awake(struct ath10k *ar)
  15169. + {
  15170. +- void __iomem *mem = ath10k_pci_priv(ar)->mem;
  15171. +- u32 val;
  15172. +- val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
  15173. +- RTC_STATE_ADDRESS);
  15174. +- return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
  15175. ++ u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
  15176. ++
  15177. ++ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
  15178. + }
  15179. +
  15180. +-int ath10k_do_pci_wake(struct ath10k *ar)
  15181. ++static int ath10k_pci_wake_wait(struct ath10k *ar)
  15182. + {
  15183. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15184. +- void __iomem *pci_addr = ar_pci->mem;
  15185. + int tot_delay = 0;
  15186. + int curr_delay = 5;
  15187. +
  15188. +- if (atomic_read(&ar_pci->keep_awake_count) == 0) {
  15189. +- /* Force AWAKE */
  15190. +- iowrite32(PCIE_SOC_WAKE_V_MASK,
  15191. +- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  15192. +- PCIE_SOC_WAKE_ADDRESS);
  15193. +- }
  15194. +- atomic_inc(&ar_pci->keep_awake_count);
  15195. +-
  15196. +- if (ar_pci->verified_awake)
  15197. +- return 0;
  15198. +-
  15199. +- for (;;) {
  15200. +- if (ath10k_pci_target_is_awake(ar)) {
  15201. +- ar_pci->verified_awake = true;
  15202. ++ while (tot_delay < PCIE_WAKE_TIMEOUT) {
  15203. ++ if (ath10k_pci_is_awake(ar))
  15204. + return 0;
  15205. +- }
  15206. +-
  15207. +- if (tot_delay > PCIE_WAKE_TIMEOUT) {
  15208. +- ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
  15209. +- PCIE_WAKE_TIMEOUT,
  15210. +- atomic_read(&ar_pci->keep_awake_count));
  15211. +- return -ETIMEDOUT;
  15212. +- }
  15213. +
  15214. + udelay(curr_delay);
  15215. + tot_delay += curr_delay;
  15216. +@@ -674,20 +815,21 @@ int ath10k_do_pci_wake(struct ath10k *ar
  15217. + if (curr_delay < 50)
  15218. + curr_delay += 5;
  15219. + }
  15220. ++
  15221. ++ return -ETIMEDOUT;
  15222. + }
  15223. +
  15224. +-void ath10k_do_pci_sleep(struct ath10k *ar)
  15225. ++static int ath10k_pci_wake(struct ath10k *ar)
  15226. + {
  15227. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15228. +- void __iomem *pci_addr = ar_pci->mem;
  15229. ++ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
  15230. ++ PCIE_SOC_WAKE_V_MASK);
  15231. ++ return ath10k_pci_wake_wait(ar);
  15232. ++}
  15233. +
  15234. +- if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
  15235. +- /* Allow sleep */
  15236. +- ar_pci->verified_awake = false;
  15237. +- iowrite32(PCIE_SOC_WAKE_RESET,
  15238. +- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  15239. +- PCIE_SOC_WAKE_ADDRESS);
  15240. +- }
  15241. ++static void ath10k_pci_sleep(struct ath10k *ar)
  15242. ++{
  15243. ++ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
  15244. ++ PCIE_SOC_WAKE_RESET);
  15245. + }
  15246. +
  15247. + /* Called by lower (CE) layer when a send to Target completes. */
  15248. +@@ -696,20 +838,24 @@ static void ath10k_pci_ce_send_done(stru
  15249. + struct ath10k *ar = ce_state->ar;
  15250. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15251. + struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  15252. +- void *transfer_context;
  15253. ++ struct sk_buff_head list;
  15254. ++ struct sk_buff *skb;
  15255. + u32 ce_data;
  15256. + unsigned int nbytes;
  15257. + unsigned int transfer_id;
  15258. +
  15259. +- while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
  15260. +- &ce_data, &nbytes,
  15261. +- &transfer_id) == 0) {
  15262. ++ __skb_queue_head_init(&list);
  15263. ++ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
  15264. ++ &nbytes, &transfer_id) == 0) {
  15265. + /* no need to call tx completion for NULL pointers */
  15266. +- if (transfer_context == NULL)
  15267. ++ if (skb == NULL)
  15268. + continue;
  15269. +
  15270. +- cb->tx_completion(ar, transfer_context, transfer_id);
  15271. ++ __skb_queue_tail(&list, skb);
  15272. + }
  15273. ++
  15274. ++ while ((skb = __skb_dequeue(&list)))
  15275. ++ cb->tx_completion(ar, skb);
  15276. + }
  15277. +
  15278. + /* Called by lower (CE) layer when data is received from the Target. */
  15279. +@@ -720,38 +866,43 @@ static void ath10k_pci_ce_recv_data(stru
  15280. + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
  15281. + struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
  15282. + struct sk_buff *skb;
  15283. ++ struct sk_buff_head list;
  15284. + void *transfer_context;
  15285. + u32 ce_data;
  15286. + unsigned int nbytes, max_nbytes;
  15287. + unsigned int transfer_id;
  15288. + unsigned int flags;
  15289. +- int err;
  15290. +
  15291. ++ __skb_queue_head_init(&list);
  15292. + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
  15293. + &ce_data, &nbytes, &transfer_id,
  15294. + &flags) == 0) {
  15295. +- err = ath10k_pci_post_rx_pipe(pipe_info, 1);
  15296. +- if (unlikely(err)) {
  15297. +- /* FIXME: retry */
  15298. +- ath10k_warn("failed to replenish CE rx ring %d: %d\n",
  15299. +- pipe_info->pipe_num, err);
  15300. +- }
  15301. +-
  15302. + skb = transfer_context;
  15303. + max_nbytes = skb->len + skb_tailroom(skb);
  15304. +- dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  15305. ++ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
  15306. + max_nbytes, DMA_FROM_DEVICE);
  15307. +
  15308. + if (unlikely(max_nbytes < nbytes)) {
  15309. +- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
  15310. ++ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
  15311. + nbytes, max_nbytes);
  15312. + dev_kfree_skb_any(skb);
  15313. + continue;
  15314. + }
  15315. +
  15316. + skb_put(skb, nbytes);
  15317. +- cb->rx_completion(ar, skb, pipe_info->pipe_num);
  15318. ++ __skb_queue_tail(&list, skb);
  15319. ++ }
  15320. ++
  15321. ++ while ((skb = __skb_dequeue(&list))) {
  15322. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
  15323. ++ ce_state->id, skb->len);
  15324. ++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
  15325. ++ skb->data, skb->len);
  15326. ++
  15327. ++ cb->rx_completion(ar, skb);
  15328. + }
  15329. ++
  15330. ++ ath10k_pci_rx_post_pipe(pipe_info);
  15331. + }
  15332. +
  15333. + static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  15334. +@@ -761,24 +912,28 @@ static int ath10k_pci_hif_tx_sg(struct a
  15335. + struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
  15336. + struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
  15337. + struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
  15338. +- unsigned int nentries_mask = src_ring->nentries_mask;
  15339. +- unsigned int sw_index = src_ring->sw_index;
  15340. +- unsigned int write_index = src_ring->write_index;
  15341. +- int err, i;
  15342. ++ unsigned int nentries_mask;
  15343. ++ unsigned int sw_index;
  15344. ++ unsigned int write_index;
  15345. ++ int err, i = 0;
  15346. +
  15347. + spin_lock_bh(&ar_pci->ce_lock);
  15348. +
  15349. ++ nentries_mask = src_ring->nentries_mask;
  15350. ++ sw_index = src_ring->sw_index;
  15351. ++ write_index = src_ring->write_index;
  15352. ++
  15353. + if (unlikely(CE_RING_DELTA(nentries_mask,
  15354. + write_index, sw_index - 1) < n_items)) {
  15355. + err = -ENOBUFS;
  15356. +- goto unlock;
  15357. ++ goto err;
  15358. + }
  15359. +
  15360. + for (i = 0; i < n_items - 1; i++) {
  15361. +- ath10k_dbg(ATH10K_DBG_PCI,
  15362. ++ ath10k_dbg(ar, ATH10K_DBG_PCI,
  15363. + "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  15364. + i, items[i].paddr, items[i].len, n_items);
  15365. +- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
  15366. ++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
  15367. + items[i].vaddr, items[i].len);
  15368. +
  15369. + err = ath10k_ce_send_nolock(ce_pipe,
  15370. +@@ -788,15 +943,15 @@ static int ath10k_pci_hif_tx_sg(struct a
  15371. + items[i].transfer_id,
  15372. + CE_SEND_FLAG_GATHER);
  15373. + if (err)
  15374. +- goto unlock;
  15375. ++ goto err;
  15376. + }
  15377. +
  15378. + /* `i` is equal to `n_items -1` after for() */
  15379. +
  15380. +- ath10k_dbg(ATH10K_DBG_PCI,
  15381. ++ ath10k_dbg(ar, ATH10K_DBG_PCI,
  15382. + "pci tx item %d paddr 0x%08x len %d n_items %d\n",
  15383. + i, items[i].paddr, items[i].len, n_items);
  15384. +- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
  15385. ++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
  15386. + items[i].vaddr, items[i].len);
  15387. +
  15388. + err = ath10k_ce_send_nolock(ce_pipe,
  15389. +@@ -806,64 +961,89 @@ static int ath10k_pci_hif_tx_sg(struct a
  15390. + items[i].transfer_id,
  15391. + 0);
  15392. + if (err)
  15393. +- goto unlock;
  15394. ++ goto err;
  15395. ++
  15396. ++ spin_unlock_bh(&ar_pci->ce_lock);
  15397. ++ return 0;
  15398. ++
  15399. ++err:
  15400. ++ for (; i > 0; i--)
  15401. ++ __ath10k_ce_send_revert(ce_pipe);
  15402. +
  15403. +- err = 0;
  15404. +-unlock:
  15405. + spin_unlock_bh(&ar_pci->ce_lock);
  15406. + return err;
  15407. + }
  15408. +
  15409. ++static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
  15410. ++ size_t buf_len)
  15411. ++{
  15412. ++ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
  15413. ++}
  15414. ++
  15415. + static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  15416. + {
  15417. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15418. +
  15419. +- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
  15420. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
  15421. +
  15422. + return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
  15423. + }
  15424. +
  15425. +-static void ath10k_pci_hif_dump_area(struct ath10k *ar)
  15426. ++static void ath10k_pci_dump_registers(struct ath10k *ar,
  15427. ++ struct ath10k_fw_crash_data *crash_data)
  15428. + {
  15429. +- u32 reg_dump_area = 0;
  15430. +- u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  15431. +- u32 host_addr;
  15432. +- int ret;
  15433. +- u32 i;
  15434. +-
  15435. +- ath10k_err("firmware crashed!\n");
  15436. +- ath10k_err("hardware name %s version 0x%x\n",
  15437. +- ar->hw_params.name, ar->target_version);
  15438. +- ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
  15439. +-
  15440. +- host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
  15441. +- ret = ath10k_pci_diag_read_mem(ar, host_addr,
  15442. +- &reg_dump_area, sizeof(u32));
  15443. +- if (ret) {
  15444. +- ath10k_err("failed to read FW dump area address: %d\n", ret);
  15445. +- return;
  15446. +- }
  15447. ++ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
  15448. ++ int i, ret;
  15449. +
  15450. +- ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
  15451. ++ lockdep_assert_held(&ar->data_lock);
  15452. +
  15453. +- ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
  15454. +- &reg_dump_values[0],
  15455. +- REG_DUMP_COUNT_QCA988X * sizeof(u32));
  15456. +- if (ret != 0) {
  15457. +- ath10k_err("failed to read FW dump area: %d\n", ret);
  15458. ++ ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
  15459. ++ hi_failure_state,
  15460. ++ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
  15461. ++ if (ret) {
  15462. ++ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
  15463. + return;
  15464. + }
  15465. +
  15466. + BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
  15467. +
  15468. +- ath10k_err("target Register Dump\n");
  15469. ++ ath10k_err(ar, "firmware register dump:\n");
  15470. + for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
  15471. +- ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  15472. ++ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
  15473. + i,
  15474. +- reg_dump_values[i],
  15475. +- reg_dump_values[i + 1],
  15476. +- reg_dump_values[i + 2],
  15477. +- reg_dump_values[i + 3]);
  15478. ++ __le32_to_cpu(reg_dump_values[i]),
  15479. ++ __le32_to_cpu(reg_dump_values[i + 1]),
  15480. ++ __le32_to_cpu(reg_dump_values[i + 2]),
  15481. ++ __le32_to_cpu(reg_dump_values[i + 3]));
  15482. ++
  15483. ++ if (!crash_data)
  15484. ++ return;
  15485. ++
  15486. ++ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
  15487. ++ crash_data->registers[i] = reg_dump_values[i];
  15488. ++}
  15489. ++
  15490. ++static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
  15491. ++{
  15492. ++ struct ath10k_fw_crash_data *crash_data;
  15493. ++ char uuid[50];
  15494. ++
  15495. ++ spin_lock_bh(&ar->data_lock);
  15496. ++
  15497. ++ ar->stats.fw_crash_counter++;
  15498. ++
  15499. ++ crash_data = ath10k_debug_get_new_fw_crash_data(ar);
  15500. ++
  15501. ++ if (crash_data)
  15502. ++ scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
  15503. ++ else
  15504. ++ scnprintf(uuid, sizeof(uuid), "n/a");
  15505. ++
  15506. ++ ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
  15507. ++ ath10k_print_driver_info(ar);
  15508. ++ ath10k_pci_dump_registers(ar, crash_data);
  15509. ++
  15510. ++ spin_unlock_bh(&ar->data_lock);
  15511. +
  15512. + queue_work(ar->workqueue, &ar->restart_work);
  15513. + }
  15514. +@@ -871,7 +1051,7 @@ static void ath10k_pci_hif_dump_area(str
  15515. + static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  15516. + int force)
  15517. + {
  15518. +- ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
  15519. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
  15520. +
  15521. + if (!force) {
  15522. + int resources;
  15523. +@@ -899,43 +1079,12 @@ static void ath10k_pci_hif_set_callbacks
  15524. + {
  15525. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15526. +
  15527. +- ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
  15528. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
  15529. +
  15530. + memcpy(&ar_pci->msg_callbacks_current, callbacks,
  15531. + sizeof(ar_pci->msg_callbacks_current));
  15532. + }
  15533. +
  15534. +-static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
  15535. +-{
  15536. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15537. +- const struct ce_attr *attr;
  15538. +- struct ath10k_pci_pipe *pipe_info;
  15539. +- int pipe_num, disable_interrupts;
  15540. +-
  15541. +- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  15542. +- pipe_info = &ar_pci->pipe_info[pipe_num];
  15543. +-
  15544. +- /* Handle Diagnostic CE specially */
  15545. +- if (pipe_info->ce_hdl == ar_pci->ce_diag)
  15546. +- continue;
  15547. +-
  15548. +- attr = &host_ce_config_wlan[pipe_num];
  15549. +-
  15550. +- if (attr->src_nentries) {
  15551. +- disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
  15552. +- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
  15553. +- ath10k_pci_ce_send_done,
  15554. +- disable_interrupts);
  15555. +- }
  15556. +-
  15557. +- if (attr->dest_nentries)
  15558. +- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
  15559. +- ath10k_pci_ce_recv_data);
  15560. +- }
  15561. +-
  15562. +- return 0;
  15563. +-}
  15564. +-
  15565. + static void ath10k_pci_kill_tasklet(struct ath10k *ar)
  15566. + {
  15567. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15568. +@@ -943,82 +1092,72 @@ static void ath10k_pci_kill_tasklet(stru
  15569. +
  15570. + tasklet_kill(&ar_pci->intr_tq);
  15571. + tasklet_kill(&ar_pci->msi_fw_err);
  15572. +- tasklet_kill(&ar_pci->early_irq_tasklet);
  15573. +
  15574. + for (i = 0; i < CE_COUNT; i++)
  15575. + tasklet_kill(&ar_pci->pipe_info[i].intr);
  15576. ++
  15577. ++ del_timer_sync(&ar_pci->rx_post_retry);
  15578. + }
  15579. +
  15580. +-/* TODO - temporary mapping while we have too few CE's */
  15581. + static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
  15582. + u16 service_id, u8 *ul_pipe,
  15583. + u8 *dl_pipe, int *ul_is_polled,
  15584. + int *dl_is_polled)
  15585. + {
  15586. +- int ret = 0;
  15587. ++ const struct service_to_pipe *entry;
  15588. ++ bool ul_set = false, dl_set = false;
  15589. ++ int i;
  15590. +
  15591. +- ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
  15592. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
  15593. +
  15594. + /* polling for received messages not supported */
  15595. + *dl_is_polled = 0;
  15596. +
  15597. +- switch (service_id) {
  15598. +- case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  15599. +- /*
  15600. +- * Host->target HTT gets its own pipe, so it can be polled
  15601. +- * while other pipes are interrupt driven.
  15602. +- */
  15603. +- *ul_pipe = 4;
  15604. +- /*
  15605. +- * Use the same target->host pipe for HTC ctrl, HTC raw
  15606. +- * streams, and HTT.
  15607. +- */
  15608. +- *dl_pipe = 1;
  15609. +- break;
  15610. ++ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
  15611. ++ entry = &target_service_to_ce_map_wlan[i];
  15612. +
  15613. +- case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  15614. +- case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  15615. +- /*
  15616. +- * Note: HTC_RAW_STREAMS_SVC is currently unused, and
  15617. +- * HTC_CTRL_RSVD_SVC could share the same pipe as the
  15618. +- * WMI services. So, if another CE is needed, change
  15619. +- * this to *ul_pipe = 3, which frees up CE 0.
  15620. +- */
  15621. +- /* *ul_pipe = 3; */
  15622. +- *ul_pipe = 0;
  15623. +- *dl_pipe = 1;
  15624. +- break;
  15625. ++ if (__le32_to_cpu(entry->service_id) != service_id)
  15626. ++ continue;
  15627. +
  15628. +- case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  15629. +- case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  15630. +- case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  15631. +- case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  15632. +-
  15633. +- case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  15634. +- *ul_pipe = 3;
  15635. +- *dl_pipe = 2;
  15636. +- break;
  15637. ++ switch (__le32_to_cpu(entry->pipedir)) {
  15638. ++ case PIPEDIR_NONE:
  15639. ++ break;
  15640. ++ case PIPEDIR_IN:
  15641. ++ WARN_ON(dl_set);
  15642. ++ *dl_pipe = __le32_to_cpu(entry->pipenum);
  15643. ++ dl_set = true;
  15644. ++ break;
  15645. ++ case PIPEDIR_OUT:
  15646. ++ WARN_ON(ul_set);
  15647. ++ *ul_pipe = __le32_to_cpu(entry->pipenum);
  15648. ++ ul_set = true;
  15649. ++ break;
  15650. ++ case PIPEDIR_INOUT:
  15651. ++ WARN_ON(dl_set);
  15652. ++ WARN_ON(ul_set);
  15653. ++ *dl_pipe = __le32_to_cpu(entry->pipenum);
  15654. ++ *ul_pipe = __le32_to_cpu(entry->pipenum);
  15655. ++ dl_set = true;
  15656. ++ ul_set = true;
  15657. ++ break;
  15658. ++ }
  15659. ++ }
  15660. +
  15661. +- /* pipe 5 unused */
  15662. +- /* pipe 6 reserved */
  15663. +- /* pipe 7 reserved */
  15664. ++ if (WARN_ON(!ul_set || !dl_set))
  15665. ++ return -ENOENT;
  15666. +
  15667. +- default:
  15668. +- ret = -1;
  15669. +- break;
  15670. +- }
  15671. + *ul_is_polled =
  15672. + (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
  15673. +
  15674. +- return ret;
  15675. ++ return 0;
  15676. + }
  15677. +
  15678. + static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
  15679. +- u8 *ul_pipe, u8 *dl_pipe)
  15680. ++ u8 *ul_pipe, u8 *dl_pipe)
  15681. + {
  15682. + int ul_is_polled, dl_is_polled;
  15683. +
  15684. +- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
  15685. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
  15686. +
  15687. + (void)ath10k_pci_hif_map_service_to_pipe(ar,
  15688. + ATH10K_HTC_SVC_ID_RSVD_CTRL,
  15689. +@@ -1028,209 +1167,127 @@ static void ath10k_pci_hif_get_default_p
  15690. + &dl_is_polled);
  15691. + }
  15692. +
  15693. +-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
  15694. +- int num)
  15695. ++static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
  15696. + {
  15697. +- struct ath10k *ar = pipe_info->hif_ce_state;
  15698. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15699. +- struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
  15700. +- struct sk_buff *skb;
  15701. +- dma_addr_t ce_data;
  15702. +- int i, ret = 0;
  15703. ++ u32 val;
  15704. +
  15705. +- if (pipe_info->buf_sz == 0)
  15706. +- return 0;
  15707. ++ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
  15708. ++ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
  15709. +
  15710. +- for (i = 0; i < num; i++) {
  15711. +- skb = dev_alloc_skb(pipe_info->buf_sz);
  15712. +- if (!skb) {
  15713. +- ath10k_warn("failed to allocate skbuff for pipe %d\n",
  15714. +- num);
  15715. +- ret = -ENOMEM;
  15716. +- goto err;
  15717. +- }
  15718. ++ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
  15719. ++}
  15720. +
  15721. +- WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  15722. ++static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
  15723. ++{
  15724. ++ u32 val;
  15725. +
  15726. +- ce_data = dma_map_single(ar->dev, skb->data,
  15727. +- skb->len + skb_tailroom(skb),
  15728. +- DMA_FROM_DEVICE);
  15729. ++ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
  15730. ++ val |= CORE_CTRL_PCIE_REG_31_MASK;
  15731. +
  15732. +- if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
  15733. +- ath10k_warn("failed to DMA map sk_buff\n");
  15734. +- dev_kfree_skb_any(skb);
  15735. +- ret = -EIO;
  15736. +- goto err;
  15737. +- }
  15738. ++ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
  15739. ++}
  15740. +
  15741. +- ATH10K_SKB_CB(skb)->paddr = ce_data;
  15742. ++static void ath10k_pci_irq_disable(struct ath10k *ar)
  15743. ++{
  15744. ++ ath10k_ce_disable_interrupts(ar);
  15745. ++ ath10k_pci_disable_and_clear_legacy_irq(ar);
  15746. ++ ath10k_pci_irq_msi_fw_mask(ar);
  15747. ++}
  15748. +
  15749. +- pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
  15750. +- pipe_info->buf_sz,
  15751. +- PCI_DMA_FROMDEVICE);
  15752. +-
  15753. +- ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
  15754. +- ce_data);
  15755. +- if (ret) {
  15756. +- ath10k_warn("failed to enqueue to pipe %d: %d\n",
  15757. +- num, ret);
  15758. +- goto err;
  15759. +- }
  15760. +- }
  15761. +-
  15762. +- return ret;
  15763. +-
  15764. +-err:
  15765. +- ath10k_pci_rx_pipe_cleanup(pipe_info);
  15766. +- return ret;
  15767. +-}
  15768. +-
  15769. +-static int ath10k_pci_post_rx(struct ath10k *ar)
  15770. ++static void ath10k_pci_irq_sync(struct ath10k *ar)
  15771. + {
  15772. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15773. +- struct ath10k_pci_pipe *pipe_info;
  15774. +- const struct ce_attr *attr;
  15775. +- int pipe_num, ret = 0;
  15776. +-
  15777. +- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  15778. +- pipe_info = &ar_pci->pipe_info[pipe_num];
  15779. +- attr = &host_ce_config_wlan[pipe_num];
  15780. +-
  15781. +- if (attr->dest_nentries == 0)
  15782. +- continue;
  15783. +-
  15784. +- ret = ath10k_pci_post_rx_pipe(pipe_info,
  15785. +- attr->dest_nentries - 1);
  15786. +- if (ret) {
  15787. +- ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
  15788. +- pipe_num, ret);
  15789. ++ int i;
  15790. +
  15791. +- for (; pipe_num >= 0; pipe_num--) {
  15792. +- pipe_info = &ar_pci->pipe_info[pipe_num];
  15793. +- ath10k_pci_rx_pipe_cleanup(pipe_info);
  15794. +- }
  15795. +- return ret;
  15796. +- }
  15797. +- }
  15798. ++ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
  15799. ++ synchronize_irq(ar_pci->pdev->irq + i);
  15800. ++}
  15801. +
  15802. +- return 0;
  15803. ++static void ath10k_pci_irq_enable(struct ath10k *ar)
  15804. ++{
  15805. ++ ath10k_ce_enable_interrupts(ar);
  15806. ++ ath10k_pci_enable_legacy_irq(ar);
  15807. ++ ath10k_pci_irq_msi_fw_unmask(ar);
  15808. + }
  15809. +
  15810. + static int ath10k_pci_hif_start(struct ath10k *ar)
  15811. + {
  15812. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15813. +- int ret, ret_early;
  15814. +-
  15815. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
  15816. +-
  15817. +- ath10k_pci_free_early_irq(ar);
  15818. +- ath10k_pci_kill_tasklet(ar);
  15819. +-
  15820. +- ret = ath10k_pci_request_irq(ar);
  15821. +- if (ret) {
  15822. +- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  15823. +- ret);
  15824. +- goto err_early_irq;
  15825. +- }
  15826. +-
  15827. +- ret = ath10k_pci_setup_ce_irq(ar);
  15828. +- if (ret) {
  15829. +- ath10k_warn("failed to setup CE interrupts: %d\n", ret);
  15830. +- goto err_stop;
  15831. +- }
  15832. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
  15833. +
  15834. +- /* Post buffers once to start things off. */
  15835. +- ret = ath10k_pci_post_rx(ar);
  15836. +- if (ret) {
  15837. +- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
  15838. +- ret);
  15839. +- goto err_stop;
  15840. +- }
  15841. ++ ath10k_pci_irq_enable(ar);
  15842. ++ ath10k_pci_rx_post(ar);
  15843. +
  15844. +- ar_pci->started = 1;
  15845. + return 0;
  15846. +-
  15847. +-err_stop:
  15848. +- ath10k_ce_disable_interrupts(ar);
  15849. +- ath10k_pci_free_irq(ar);
  15850. +- ath10k_pci_kill_tasklet(ar);
  15851. +-err_early_irq:
  15852. +- /* Though there should be no interrupts (device was reset)
  15853. +- * power_down() expects the early IRQ to be installed as per the
  15854. +- * driver lifecycle. */
  15855. +- ret_early = ath10k_pci_request_early_irq(ar);
  15856. +- if (ret_early)
  15857. +- ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
  15858. +-
  15859. +- return ret;
  15860. + }
  15861. +
  15862. +-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  15863. ++static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
  15864. + {
  15865. + struct ath10k *ar;
  15866. +- struct ath10k_pci *ar_pci;
  15867. +- struct ath10k_ce_pipe *ce_hdl;
  15868. +- u32 buf_sz;
  15869. +- struct sk_buff *netbuf;
  15870. +- u32 ce_data;
  15871. ++ struct ath10k_ce_pipe *ce_pipe;
  15872. ++ struct ath10k_ce_ring *ce_ring;
  15873. ++ struct sk_buff *skb;
  15874. ++ int i;
  15875. +
  15876. +- buf_sz = pipe_info->buf_sz;
  15877. ++ ar = pci_pipe->hif_ce_state;
  15878. ++ ce_pipe = pci_pipe->ce_hdl;
  15879. ++ ce_ring = ce_pipe->dest_ring;
  15880. +
  15881. +- /* Unused Copy Engine */
  15882. +- if (buf_sz == 0)
  15883. ++ if (!ce_ring)
  15884. + return;
  15885. +
  15886. +- ar = pipe_info->hif_ce_state;
  15887. +- ar_pci = ath10k_pci_priv(ar);
  15888. +-
  15889. +- if (!ar_pci->started)
  15890. ++ if (!pci_pipe->buf_sz)
  15891. + return;
  15892. +
  15893. +- ce_hdl = pipe_info->ce_hdl;
  15894. ++ for (i = 0; i < ce_ring->nentries; i++) {
  15895. ++ skb = ce_ring->per_transfer_context[i];
  15896. ++ if (!skb)
  15897. ++ continue;
  15898. ++
  15899. ++ ce_ring->per_transfer_context[i] = NULL;
  15900. +
  15901. +- while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
  15902. +- &ce_data) == 0) {
  15903. +- dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
  15904. +- netbuf->len + skb_tailroom(netbuf),
  15905. ++ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
  15906. ++ skb->len + skb_tailroom(skb),
  15907. + DMA_FROM_DEVICE);
  15908. +- dev_kfree_skb_any(netbuf);
  15909. ++ dev_kfree_skb_any(skb);
  15910. + }
  15911. + }
  15912. +
  15913. +-static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
  15914. ++static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
  15915. + {
  15916. + struct ath10k *ar;
  15917. + struct ath10k_pci *ar_pci;
  15918. +- struct ath10k_ce_pipe *ce_hdl;
  15919. +- struct sk_buff *netbuf;
  15920. +- u32 ce_data;
  15921. +- unsigned int nbytes;
  15922. ++ struct ath10k_ce_pipe *ce_pipe;
  15923. ++ struct ath10k_ce_ring *ce_ring;
  15924. ++ struct ce_desc *ce_desc;
  15925. ++ struct sk_buff *skb;
  15926. + unsigned int id;
  15927. +- u32 buf_sz;
  15928. ++ int i;
  15929. +
  15930. +- buf_sz = pipe_info->buf_sz;
  15931. ++ ar = pci_pipe->hif_ce_state;
  15932. ++ ar_pci = ath10k_pci_priv(ar);
  15933. ++ ce_pipe = pci_pipe->ce_hdl;
  15934. ++ ce_ring = ce_pipe->src_ring;
  15935. +
  15936. +- /* Unused Copy Engine */
  15937. +- if (buf_sz == 0)
  15938. ++ if (!ce_ring)
  15939. + return;
  15940. +
  15941. +- ar = pipe_info->hif_ce_state;
  15942. +- ar_pci = ath10k_pci_priv(ar);
  15943. +-
  15944. +- if (!ar_pci->started)
  15945. ++ if (!pci_pipe->buf_sz)
  15946. + return;
  15947. +
  15948. +- ce_hdl = pipe_info->ce_hdl;
  15949. ++ ce_desc = ce_ring->shadow_base;
  15950. ++ if (WARN_ON(!ce_desc))
  15951. ++ return;
  15952. +
  15953. +- while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
  15954. +- &ce_data, &nbytes, &id) == 0) {
  15955. +- /* no need to call tx completion for NULL pointers */
  15956. +- if (!netbuf)
  15957. ++ for (i = 0; i < ce_ring->nentries; i++) {
  15958. ++ skb = ce_ring->per_transfer_context[i];
  15959. ++ if (!skb)
  15960. + continue;
  15961. +
  15962. +- ar_pci->msg_callbacks_current.tx_completion(ar,
  15963. +- netbuf,
  15964. +- id);
  15965. ++ ce_ring->per_transfer_context[i] = NULL;
  15966. ++ id = MS(__le16_to_cpu(ce_desc[i].flags),
  15967. ++ CE_DESC_FLAGS_META_DATA);
  15968. ++
  15969. ++ ar_pci->msg_callbacks_current.tx_completion(ar, skb);
  15970. + }
  15971. + }
  15972. +
  15973. +@@ -1264,38 +1321,32 @@ static void ath10k_pci_ce_deinit(struct
  15974. + ath10k_ce_deinit_pipe(ar, i);
  15975. + }
  15976. +
  15977. +-static void ath10k_pci_hif_stop(struct ath10k *ar)
  15978. ++static void ath10k_pci_flush(struct ath10k *ar)
  15979. + {
  15980. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  15981. +- int ret;
  15982. +-
  15983. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
  15984. +-
  15985. +- ret = ath10k_ce_disable_interrupts(ar);
  15986. +- if (ret)
  15987. +- ath10k_warn("failed to disable CE interrupts: %d\n", ret);
  15988. +-
  15989. +- ath10k_pci_free_irq(ar);
  15990. + ath10k_pci_kill_tasklet(ar);
  15991. +-
  15992. +- ret = ath10k_pci_request_early_irq(ar);
  15993. +- if (ret)
  15994. +- ath10k_warn("failed to re-enable early irq: %d\n", ret);
  15995. +-
  15996. +- /* At this point, asynchronous threads are stopped, the target should
  15997. +- * not DMA nor interrupt. We process the leftovers and then free
  15998. +- * everything else up. */
  15999. +-
  16000. + ath10k_pci_buffer_cleanup(ar);
  16001. ++}
  16002. +
  16003. +- /* Make the sure the device won't access any structures on the host by
  16004. +- * resetting it. The device was fed with PCI CE ringbuffer
  16005. +- * configuration during init. If ringbuffers are freed and the device
  16006. +- * were to access them this could lead to memory corruption on the
  16007. +- * host. */
  16008. ++static void ath10k_pci_hif_stop(struct ath10k *ar)
  16009. ++{
  16010. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
  16011. ++
  16012. ++ /* Most likely the device has HTT Rx ring configured. The only way to
  16013. ++ * prevent the device from accessing (and possible corrupting) host
  16014. ++ * memory is to reset the chip now.
  16015. ++ *
  16016. ++ * There's also no known way of masking MSI interrupts on the device.
  16017. ++ * For ranged MSI the CE-related interrupts can be masked. However
  16018. ++ * regardless how many MSI interrupts are assigned the first one
  16019. ++ * is always used for firmware indications (crashes) and cannot be
  16020. ++ * masked. To prevent the device from asserting the interrupt reset it
  16021. ++ * before proceeding with cleanup.
  16022. ++ */
  16023. + ath10k_pci_warm_reset(ar);
  16024. +
  16025. +- ar_pci->started = 0;
  16026. ++ ath10k_pci_irq_disable(ar);
  16027. ++ ath10k_pci_irq_sync(ar);
  16028. ++ ath10k_pci_flush(ar);
  16029. + }
  16030. +
  16031. + static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
  16032. +@@ -1346,11 +1397,9 @@ static int ath10k_pci_hif_exchange_bmi_m
  16033. + xfer.wait_for_resp = true;
  16034. + xfer.resp_len = 0;
  16035. +
  16036. +- ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
  16037. ++ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
  16038. + }
  16039. +
  16040. +- init_completion(&xfer.done);
  16041. +-
  16042. + ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
  16043. + if (ret)
  16044. + goto err_resp;
  16045. +@@ -1401,14 +1450,12 @@ static void ath10k_pci_bmi_send_done(str
  16046. + &nbytes, &transfer_id))
  16047. + return;
  16048. +
  16049. +- if (xfer->wait_for_resp)
  16050. +- return;
  16051. +-
  16052. +- complete(&xfer->done);
  16053. ++ xfer->tx_done = true;
  16054. + }
  16055. +
  16056. + static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
  16057. + {
  16058. ++ struct ath10k *ar = ce_state->ar;
  16059. + struct bmi_xfer *xfer;
  16060. + u32 ce_data;
  16061. + unsigned int nbytes;
  16062. +@@ -1419,13 +1466,16 @@ static void ath10k_pci_bmi_recv_data(str
  16063. + &nbytes, &transfer_id, &flags))
  16064. + return;
  16065. +
  16066. ++ if (WARN_ON_ONCE(!xfer))
  16067. ++ return;
  16068. ++
  16069. + if (!xfer->wait_for_resp) {
  16070. +- ath10k_warn("unexpected: BMI data received; ignoring\n");
  16071. ++ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
  16072. + return;
  16073. + }
  16074. +
  16075. + xfer->resp_len = nbytes;
  16076. +- complete(&xfer->done);
  16077. ++ xfer->rx_done = true;
  16078. + }
  16079. +
  16080. + static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  16081. +@@ -1438,7 +1488,7 @@ static int ath10k_pci_bmi_wait(struct at
  16082. + ath10k_pci_bmi_send_done(tx_pipe);
  16083. + ath10k_pci_bmi_recv_data(rx_pipe);
  16084. +
  16085. +- if (completion_done(&xfer->done))
  16086. ++ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
  16087. + return 0;
  16088. +
  16089. + schedule();
  16090. +@@ -1448,131 +1498,48 @@ static int ath10k_pci_bmi_wait(struct at
  16091. + }
  16092. +
  16093. + /*
  16094. +- * Map from service/endpoint to Copy Engine.
  16095. +- * This table is derived from the CE_PCI TABLE, above.
  16096. +- * It is passed to the Target at startup for use by firmware.
  16097. +- */
  16098. +-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
  16099. +- {
  16100. +- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  16101. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16102. +- 3,
  16103. +- },
  16104. +- {
  16105. +- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
  16106. +- PIPEDIR_IN, /* in = DL = target -> host */
  16107. +- 2,
  16108. +- },
  16109. +- {
  16110. +- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  16111. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16112. +- 3,
  16113. +- },
  16114. +- {
  16115. +- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
  16116. +- PIPEDIR_IN, /* in = DL = target -> host */
  16117. +- 2,
  16118. +- },
  16119. +- {
  16120. +- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  16121. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16122. +- 3,
  16123. +- },
  16124. +- {
  16125. +- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
  16126. +- PIPEDIR_IN, /* in = DL = target -> host */
  16127. +- 2,
  16128. +- },
  16129. +- {
  16130. +- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  16131. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16132. +- 3,
  16133. +- },
  16134. +- {
  16135. +- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
  16136. +- PIPEDIR_IN, /* in = DL = target -> host */
  16137. +- 2,
  16138. +- },
  16139. +- {
  16140. +- ATH10K_HTC_SVC_ID_WMI_CONTROL,
  16141. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16142. +- 3,
  16143. +- },
  16144. +- {
  16145. +- ATH10K_HTC_SVC_ID_WMI_CONTROL,
  16146. +- PIPEDIR_IN, /* in = DL = target -> host */
  16147. +- 2,
  16148. +- },
  16149. +- {
  16150. +- ATH10K_HTC_SVC_ID_RSVD_CTRL,
  16151. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16152. +- 0, /* could be moved to 3 (share with WMI) */
  16153. +- },
  16154. +- {
  16155. +- ATH10K_HTC_SVC_ID_RSVD_CTRL,
  16156. +- PIPEDIR_IN, /* in = DL = target -> host */
  16157. +- 1,
  16158. +- },
  16159. +- {
  16160. +- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  16161. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16162. +- 0,
  16163. +- },
  16164. +- {
  16165. +- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
  16166. +- PIPEDIR_IN, /* in = DL = target -> host */
  16167. +- 1,
  16168. +- },
  16169. +- {
  16170. +- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  16171. +- PIPEDIR_OUT, /* out = UL = host -> target */
  16172. +- 4,
  16173. +- },
  16174. +- {
  16175. +- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
  16176. +- PIPEDIR_IN, /* in = DL = target -> host */
  16177. +- 1,
  16178. +- },
  16179. +-
  16180. +- /* (Additions here) */
  16181. +-
  16182. +- { /* Must be last */
  16183. +- 0,
  16184. +- 0,
  16185. +- 0,
  16186. +- },
  16187. +-};
  16188. +-
  16189. +-/*
  16190. + * Send an interrupt to the device to wake up the Target CPU
  16191. + * so it has an opportunity to notice any changed state.
  16192. + */
  16193. + static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
  16194. + {
  16195. +- int ret;
  16196. +- u32 core_ctrl;
  16197. ++ u32 addr, val;
  16198. +
  16199. +- ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
  16200. +- CORE_CTRL_ADDRESS,
  16201. +- &core_ctrl);
  16202. +- if (ret) {
  16203. +- ath10k_warn("failed to read core_ctrl: %d\n", ret);
  16204. +- return ret;
  16205. +- }
  16206. ++ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
  16207. ++ val = ath10k_pci_read32(ar, addr);
  16208. ++ val |= CORE_CTRL_CPU_INTR_MASK;
  16209. ++ ath10k_pci_write32(ar, addr, val);
  16210. +
  16211. +- /* A_INUM_FIRMWARE interrupt to Target CPU */
  16212. +- core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  16213. ++ return 0;
  16214. ++}
  16215. +
  16216. +- ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
  16217. +- CORE_CTRL_ADDRESS,
  16218. +- core_ctrl);
  16219. +- if (ret) {
  16220. +- ath10k_warn("failed to set target CPU interrupt mask: %d\n",
  16221. +- ret);
  16222. +- return ret;
  16223. ++static int ath10k_pci_get_num_banks(struct ath10k *ar)
  16224. ++{
  16225. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16226. ++
  16227. ++ switch (ar_pci->pdev->device) {
  16228. ++ case QCA988X_2_0_DEVICE_ID:
  16229. ++ return 1;
  16230. ++ case QCA6174_2_1_DEVICE_ID:
  16231. ++ switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
  16232. ++ case QCA6174_HW_1_0_CHIP_ID_REV:
  16233. ++ case QCA6174_HW_1_1_CHIP_ID_REV:
  16234. ++ return 3;
  16235. ++ case QCA6174_HW_1_3_CHIP_ID_REV:
  16236. ++ return 2;
  16237. ++ case QCA6174_HW_2_1_CHIP_ID_REV:
  16238. ++ case QCA6174_HW_2_2_CHIP_ID_REV:
  16239. ++ return 6;
  16240. ++ case QCA6174_HW_3_0_CHIP_ID_REV:
  16241. ++ case QCA6174_HW_3_1_CHIP_ID_REV:
  16242. ++ case QCA6174_HW_3_2_CHIP_ID_REV:
  16243. ++ return 9;
  16244. ++ }
  16245. ++ break;
  16246. + }
  16247. +
  16248. +- return 0;
  16249. ++ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
  16250. ++ return 1;
  16251. + }
  16252. +
  16253. + static int ath10k_pci_init_config(struct ath10k *ar)
  16254. +@@ -1593,144 +1560,162 @@ static int ath10k_pci_init_config(struct
  16255. + host_interest_item_address(HI_ITEM(hi_interconnect_state));
  16256. +
  16257. + /* Supply Target-side CE configuration */
  16258. +- ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
  16259. +- &pcie_state_targ_addr);
  16260. ++ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
  16261. ++ &pcie_state_targ_addr);
  16262. + if (ret != 0) {
  16263. +- ath10k_err("Failed to get pcie state addr: %d\n", ret);
  16264. ++ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
  16265. + return ret;
  16266. + }
  16267. +
  16268. + if (pcie_state_targ_addr == 0) {
  16269. + ret = -EIO;
  16270. +- ath10k_err("Invalid pcie state addr\n");
  16271. ++ ath10k_err(ar, "Invalid pcie state addr\n");
  16272. + return ret;
  16273. + }
  16274. +
  16275. +- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  16276. ++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  16277. + offsetof(struct pcie_state,
  16278. +- pipe_cfg_addr),
  16279. +- &pipe_cfg_targ_addr);
  16280. ++ pipe_cfg_addr)),
  16281. ++ &pipe_cfg_targ_addr);
  16282. + if (ret != 0) {
  16283. +- ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
  16284. ++ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
  16285. + return ret;
  16286. + }
  16287. +
  16288. + if (pipe_cfg_targ_addr == 0) {
  16289. + ret = -EIO;
  16290. +- ath10k_err("Invalid pipe cfg addr\n");
  16291. ++ ath10k_err(ar, "Invalid pipe cfg addr\n");
  16292. + return ret;
  16293. + }
  16294. +
  16295. + ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
  16296. +- target_ce_config_wlan,
  16297. +- sizeof(target_ce_config_wlan));
  16298. ++ target_ce_config_wlan,
  16299. ++ sizeof(target_ce_config_wlan));
  16300. +
  16301. + if (ret != 0) {
  16302. +- ath10k_err("Failed to write pipe cfg: %d\n", ret);
  16303. ++ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
  16304. + return ret;
  16305. + }
  16306. +
  16307. +- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  16308. ++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  16309. + offsetof(struct pcie_state,
  16310. +- svc_to_pipe_map),
  16311. +- &svc_to_pipe_map);
  16312. ++ svc_to_pipe_map)),
  16313. ++ &svc_to_pipe_map);
  16314. + if (ret != 0) {
  16315. +- ath10k_err("Failed to get svc/pipe map: %d\n", ret);
  16316. ++ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
  16317. + return ret;
  16318. + }
  16319. +
  16320. + if (svc_to_pipe_map == 0) {
  16321. + ret = -EIO;
  16322. +- ath10k_err("Invalid svc_to_pipe map\n");
  16323. ++ ath10k_err(ar, "Invalid svc_to_pipe map\n");
  16324. + return ret;
  16325. + }
  16326. +
  16327. + ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
  16328. +- target_service_to_ce_map_wlan,
  16329. +- sizeof(target_service_to_ce_map_wlan));
  16330. ++ target_service_to_ce_map_wlan,
  16331. ++ sizeof(target_service_to_ce_map_wlan));
  16332. + if (ret != 0) {
  16333. +- ath10k_err("Failed to write svc/pipe map: %d\n", ret);
  16334. ++ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
  16335. + return ret;
  16336. + }
  16337. +
  16338. +- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
  16339. ++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
  16340. + offsetof(struct pcie_state,
  16341. +- config_flags),
  16342. +- &pcie_config_flags);
  16343. ++ config_flags)),
  16344. ++ &pcie_config_flags);
  16345. + if (ret != 0) {
  16346. +- ath10k_err("Failed to get pcie config_flags: %d\n", ret);
  16347. ++ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
  16348. + return ret;
  16349. + }
  16350. +
  16351. + pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  16352. +
  16353. +- ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
  16354. +- offsetof(struct pcie_state, config_flags),
  16355. +- &pcie_config_flags,
  16356. +- sizeof(pcie_config_flags));
  16357. ++ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
  16358. ++ offsetof(struct pcie_state,
  16359. ++ config_flags)),
  16360. ++ pcie_config_flags);
  16361. + if (ret != 0) {
  16362. +- ath10k_err("Failed to write pcie config_flags: %d\n", ret);
  16363. ++ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
  16364. + return ret;
  16365. + }
  16366. +
  16367. + /* configure early allocation */
  16368. + ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
  16369. +
  16370. +- ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
  16371. ++ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
  16372. + if (ret != 0) {
  16373. +- ath10k_err("Faile to get early alloc val: %d\n", ret);
  16374. ++ ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
  16375. + return ret;
  16376. + }
  16377. +
  16378. + /* first bank is switched to IRAM */
  16379. + ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  16380. + HI_EARLY_ALLOC_MAGIC_MASK);
  16381. +- ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  16382. ++ ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
  16383. ++ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
  16384. + HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  16385. +
  16386. +- ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
  16387. ++ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
  16388. + if (ret != 0) {
  16389. +- ath10k_err("Failed to set early alloc val: %d\n", ret);
  16390. ++ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
  16391. + return ret;
  16392. + }
  16393. +
  16394. + /* Tell Target to proceed with initialization */
  16395. + flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
  16396. +
  16397. +- ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
  16398. ++ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
  16399. + if (ret != 0) {
  16400. +- ath10k_err("Failed to get option val: %d\n", ret);
  16401. ++ ath10k_err(ar, "Failed to get option val: %d\n", ret);
  16402. + return ret;
  16403. + }
  16404. +
  16405. + flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  16406. +
  16407. +- ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
  16408. ++ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
  16409. + if (ret != 0) {
  16410. +- ath10k_err("Failed to set option val: %d\n", ret);
  16411. ++ ath10k_err(ar, "Failed to set option val: %d\n", ret);
  16412. + return ret;
  16413. + }
  16414. +
  16415. + return 0;
  16416. + }
  16417. +
  16418. +-static int ath10k_pci_alloc_ce(struct ath10k *ar)
  16419. ++static int ath10k_pci_alloc_pipes(struct ath10k *ar)
  16420. + {
  16421. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16422. ++ struct ath10k_pci_pipe *pipe;
  16423. + int i, ret;
  16424. +
  16425. + for (i = 0; i < CE_COUNT; i++) {
  16426. +- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
  16427. ++ pipe = &ar_pci->pipe_info[i];
  16428. ++ pipe->ce_hdl = &ar_pci->ce_states[i];
  16429. ++ pipe->pipe_num = i;
  16430. ++ pipe->hif_ce_state = ar;
  16431. ++
  16432. ++ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
  16433. ++ ath10k_pci_ce_send_done,
  16434. ++ ath10k_pci_ce_recv_data);
  16435. + if (ret) {
  16436. +- ath10k_err("failed to allocate copy engine pipe %d: %d\n",
  16437. ++ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
  16438. + i, ret);
  16439. + return ret;
  16440. + }
  16441. ++
  16442. ++ /* Last CE is Diagnostic Window */
  16443. ++ if (i == CE_COUNT - 1) {
  16444. ++ ar_pci->ce_diag = pipe->ce_hdl;
  16445. ++ continue;
  16446. ++ }
  16447. ++
  16448. ++ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
  16449. + }
  16450. +
  16451. + return 0;
  16452. + }
  16453. +
  16454. +-static void ath10k_pci_free_ce(struct ath10k *ar)
  16455. ++static void ath10k_pci_free_pipes(struct ath10k *ar)
  16456. + {
  16457. + int i;
  16458. +
  16459. +@@ -1738,305 +1723,319 @@ static void ath10k_pci_free_ce(struct at
  16460. + ath10k_ce_free_pipe(ar, i);
  16461. + }
  16462. +
  16463. +-static int ath10k_pci_ce_init(struct ath10k *ar)
  16464. ++static int ath10k_pci_init_pipes(struct ath10k *ar)
  16465. + {
  16466. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16467. +- struct ath10k_pci_pipe *pipe_info;
  16468. +- const struct ce_attr *attr;
  16469. +- int pipe_num, ret;
  16470. +-
  16471. +- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  16472. +- pipe_info = &ar_pci->pipe_info[pipe_num];
  16473. +- pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
  16474. +- pipe_info->pipe_num = pipe_num;
  16475. +- pipe_info->hif_ce_state = ar;
  16476. +- attr = &host_ce_config_wlan[pipe_num];
  16477. ++ int i, ret;
  16478. +
  16479. +- ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
  16480. ++ for (i = 0; i < CE_COUNT; i++) {
  16481. ++ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
  16482. + if (ret) {
  16483. +- ath10k_err("failed to initialize copy engine pipe %d: %d\n",
  16484. +- pipe_num, ret);
  16485. ++ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
  16486. ++ i, ret);
  16487. + return ret;
  16488. + }
  16489. +-
  16490. +- if (pipe_num == CE_COUNT - 1) {
  16491. +- /*
  16492. +- * Reserve the ultimate CE for
  16493. +- * diagnostic Window support
  16494. +- */
  16495. +- ar_pci->ce_diag = pipe_info->ce_hdl;
  16496. +- continue;
  16497. +- }
  16498. +-
  16499. +- pipe_info->buf_sz = (size_t) (attr->src_sz_max);
  16500. + }
  16501. +
  16502. + return 0;
  16503. + }
  16504. +
  16505. +-static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
  16506. ++static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
  16507. + {
  16508. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16509. +- u32 fw_indicator;
  16510. +-
  16511. +- ath10k_pci_wake(ar);
  16512. +-
  16513. +- fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  16514. +-
  16515. +- if (fw_indicator & FW_IND_EVENT_PENDING) {
  16516. +- /* ACK: clear Target-side pending event */
  16517. +- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
  16518. +- fw_indicator & ~FW_IND_EVENT_PENDING);
  16519. +-
  16520. +- if (ar_pci->started) {
  16521. +- ath10k_pci_hif_dump_area(ar);
  16522. +- } else {
  16523. +- /*
  16524. +- * Probable Target failure before we're prepared
  16525. +- * to handle it. Generally unexpected.
  16526. +- */
  16527. +- ath10k_warn("early firmware event indicated\n");
  16528. +- }
  16529. +- }
  16530. +-
  16531. +- ath10k_pci_sleep(ar);
  16532. ++ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
  16533. ++ FW_IND_EVENT_PENDING;
  16534. + }
  16535. +
  16536. +-static int ath10k_pci_warm_reset(struct ath10k *ar)
  16537. ++static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
  16538. + {
  16539. +- int ret = 0;
  16540. + u32 val;
  16541. +
  16542. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
  16543. ++ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  16544. ++ val &= ~FW_IND_EVENT_PENDING;
  16545. ++ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
  16546. ++}
  16547. +
  16548. +- ret = ath10k_do_pci_wake(ar);
  16549. +- if (ret) {
  16550. +- ath10k_err("failed to wake up target: %d\n", ret);
  16551. +- return ret;
  16552. +- }
  16553. ++/* this function effectively clears target memory controller assert line */
  16554. ++static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
  16555. ++{
  16556. ++ u32 val;
  16557. +
  16558. +- /* debug */
  16559. +- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  16560. +- PCIE_INTR_CAUSE_ADDRESS);
  16561. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
  16562. ++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  16563. ++ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  16564. ++ val | SOC_RESET_CONTROL_SI0_RST_MASK);
  16565. ++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  16566. +
  16567. +- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  16568. +- CPU_INTR_ADDRESS);
  16569. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  16570. +- val);
  16571. ++ msleep(10);
  16572. +
  16573. +- /* disable pending irqs */
  16574. +- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  16575. +- PCIE_INTR_ENABLE_ADDRESS, 0);
  16576. ++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  16577. ++ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
  16578. ++ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
  16579. ++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
  16580. +
  16581. +- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
  16582. +- PCIE_INTR_CLR_ADDRESS, ~0);
  16583. ++ msleep(10);
  16584. ++}
  16585. +
  16586. +- msleep(100);
  16587. ++static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
  16588. ++{
  16589. ++ u32 val;
  16590. +
  16591. +- /* clear fw indicator */
  16592. + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
  16593. +
  16594. +- /* clear target LF timer interrupts */
  16595. + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16596. +- SOC_LF_TIMER_CONTROL0_ADDRESS);
  16597. +- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
  16598. +- SOC_LF_TIMER_CONTROL0_ADDRESS,
  16599. +- val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
  16600. ++ SOC_RESET_CONTROL_ADDRESS);
  16601. ++ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  16602. ++ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
  16603. ++}
  16604. ++
  16605. ++static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
  16606. ++{
  16607. ++ u32 val;
  16608. +
  16609. +- /* reset CE */
  16610. + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16611. + SOC_RESET_CONTROL_ADDRESS);
  16612. ++
  16613. + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  16614. + val | SOC_RESET_CONTROL_CE_RST_MASK);
  16615. +- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16616. +- SOC_RESET_CONTROL_ADDRESS);
  16617. + msleep(10);
  16618. +-
  16619. +- /* unreset CE */
  16620. + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  16621. + val & ~SOC_RESET_CONTROL_CE_RST_MASK);
  16622. +- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16623. +- SOC_RESET_CONTROL_ADDRESS);
  16624. +- msleep(10);
  16625. ++}
  16626. +
  16627. +- /* debug */
  16628. +- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  16629. +- PCIE_INTR_CAUSE_ADDRESS);
  16630. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
  16631. +-
  16632. +- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
  16633. +- CPU_INTR_ADDRESS);
  16634. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
  16635. +- val);
  16636. ++static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
  16637. ++{
  16638. ++ u32 val;
  16639. +
  16640. +- /* CPU warm reset */
  16641. + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16642. +- SOC_RESET_CONTROL_ADDRESS);
  16643. +- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
  16644. +- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
  16645. ++ SOC_LF_TIMER_CONTROL0_ADDRESS);
  16646. ++ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
  16647. ++ SOC_LF_TIMER_CONTROL0_ADDRESS,
  16648. ++ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
  16649. ++}
  16650. +
  16651. +- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
  16652. +- SOC_RESET_CONTROL_ADDRESS);
  16653. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
  16654. ++static int ath10k_pci_warm_reset(struct ath10k *ar)
  16655. ++{
  16656. ++ int ret;
  16657. +
  16658. +- msleep(100);
  16659. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
  16660. +
  16661. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
  16662. ++ spin_lock_bh(&ar->data_lock);
  16663. ++ ar->stats.fw_warm_reset_counter++;
  16664. ++ spin_unlock_bh(&ar->data_lock);
  16665. ++
  16666. ++ ath10k_pci_irq_disable(ar);
  16667. ++
  16668. ++ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
  16669. ++ * were to access copy engine while host performs copy engine reset
  16670. ++ * then it is possible for the device to confuse pci-e controller to
  16671. ++ * the point of bringing host system to a complete stop (i.e. hang).
  16672. ++ */
  16673. ++ ath10k_pci_warm_reset_si0(ar);
  16674. ++ ath10k_pci_warm_reset_cpu(ar);
  16675. ++ ath10k_pci_init_pipes(ar);
  16676. ++ ath10k_pci_wait_for_target_init(ar);
  16677. ++
  16678. ++ ath10k_pci_warm_reset_clear_lf(ar);
  16679. ++ ath10k_pci_warm_reset_ce(ar);
  16680. ++ ath10k_pci_warm_reset_cpu(ar);
  16681. ++ ath10k_pci_init_pipes(ar);
  16682. +
  16683. +- ath10k_do_pci_sleep(ar);
  16684. +- return ret;
  16685. ++ ret = ath10k_pci_wait_for_target_init(ar);
  16686. ++ if (ret) {
  16687. ++ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
  16688. ++ return ret;
  16689. ++ }
  16690. ++
  16691. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
  16692. ++
  16693. ++ return 0;
  16694. + }
  16695. +
  16696. +-static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
  16697. ++static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
  16698. + {
  16699. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16700. +- const char *irq_mode;
  16701. +- int ret;
  16702. ++ int i, ret;
  16703. ++ u32 val;
  16704. +
  16705. +- /*
  16706. +- * Bring the target up cleanly.
  16707. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
  16708. ++
  16709. ++ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
  16710. ++ * It is thus preferred to use warm reset which is safer but may not be
  16711. ++ * able to recover the device from all possible fail scenarios.
  16712. + *
  16713. +- * The target may be in an undefined state with an AUX-powered Target
  16714. +- * and a Host in WoW mode. If the Host crashes, loses power, or is
  16715. +- * restarted (without unloading the driver) then the Target is left
  16716. +- * (aux) powered and running. On a subsequent driver load, the Target
  16717. +- * is in an unexpected state. We try to catch that here in order to
  16718. +- * reset the Target and retry the probe.
  16719. ++ * Warm reset doesn't always work on first try so attempt it a few
  16720. ++ * times before giving up.
  16721. + */
  16722. +- if (cold_reset)
  16723. +- ret = ath10k_pci_cold_reset(ar);
  16724. +- else
  16725. ++ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
  16726. + ret = ath10k_pci_warm_reset(ar);
  16727. ++ if (ret) {
  16728. ++ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
  16729. ++ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
  16730. ++ ret);
  16731. ++ continue;
  16732. ++ }
  16733. ++
  16734. ++ /* FIXME: Sometimes copy engine doesn't recover after warm
  16735. ++ * reset. In most cases this needs cold reset. In some of these
  16736. ++ * cases the device is in such a state that a cold reset may
  16737. ++ * lock up the host.
  16738. ++ *
  16739. ++ * Reading any host interest register via copy engine is
  16740. ++ * sufficient to verify if device is capable of booting
  16741. ++ * firmware blob.
  16742. ++ */
  16743. ++ ret = ath10k_pci_init_pipes(ar);
  16744. ++ if (ret) {
  16745. ++ ath10k_warn(ar, "failed to init copy engine: %d\n",
  16746. ++ ret);
  16747. ++ continue;
  16748. ++ }
  16749. ++
  16750. ++ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
  16751. ++ &val);
  16752. ++ if (ret) {
  16753. ++ ath10k_warn(ar, "failed to poke copy engine: %d\n",
  16754. ++ ret);
  16755. ++ continue;
  16756. ++ }
  16757. ++
  16758. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
  16759. ++ return 0;
  16760. ++ }
  16761. +
  16762. ++ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
  16763. ++ ath10k_warn(ar, "refusing cold reset as requested\n");
  16764. ++ return -EPERM;
  16765. ++ }
  16766. ++
  16767. ++ ret = ath10k_pci_cold_reset(ar);
  16768. + if (ret) {
  16769. +- ath10k_err("failed to reset target: %d\n", ret);
  16770. +- goto err;
  16771. ++ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
  16772. ++ return ret;
  16773. ++ }
  16774. ++
  16775. ++ ret = ath10k_pci_wait_for_target_init(ar);
  16776. ++ if (ret) {
  16777. ++ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
  16778. ++ ret);
  16779. ++ return ret;
  16780. + }
  16781. +
  16782. +- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  16783. +- /* Force AWAKE forever */
  16784. +- ath10k_do_pci_wake(ar);
  16785. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
  16786. +
  16787. +- ret = ath10k_pci_ce_init(ar);
  16788. ++ return 0;
  16789. ++}
  16790. ++
  16791. ++static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
  16792. ++{
  16793. ++ int ret;
  16794. ++
  16795. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
  16796. ++
  16797. ++ /* FIXME: QCA6174 requires cold + warm reset to work. */
  16798. ++
  16799. ++ ret = ath10k_pci_cold_reset(ar);
  16800. + if (ret) {
  16801. +- ath10k_err("failed to initialize CE: %d\n", ret);
  16802. +- goto err_ps;
  16803. ++ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
  16804. ++ return ret;
  16805. + }
  16806. +
  16807. +- ret = ath10k_ce_disable_interrupts(ar);
  16808. ++ ret = ath10k_pci_wait_for_target_init(ar);
  16809. + if (ret) {
  16810. +- ath10k_err("failed to disable CE interrupts: %d\n", ret);
  16811. +- goto err_ce;
  16812. ++ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
  16813. ++ ret);
  16814. ++ return ret;
  16815. + }
  16816. +
  16817. +- ret = ath10k_pci_init_irq(ar);
  16818. ++ ret = ath10k_pci_warm_reset(ar);
  16819. ++ if (ret) {
  16820. ++ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
  16821. ++ return ret;
  16822. ++ }
  16823. ++
  16824. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
  16825. ++
  16826. ++ return 0;
  16827. ++}
  16828. ++
  16829. ++static int ath10k_pci_chip_reset(struct ath10k *ar)
  16830. ++{
  16831. ++ if (QCA_REV_988X(ar))
  16832. ++ return ath10k_pci_qca988x_chip_reset(ar);
  16833. ++ else if (QCA_REV_6174(ar))
  16834. ++ return ath10k_pci_qca6174_chip_reset(ar);
  16835. ++ else
  16836. ++ return -ENOTSUPP;
  16837. ++}
  16838. ++
  16839. ++static int ath10k_pci_hif_power_up(struct ath10k *ar)
  16840. ++{
  16841. ++ int ret;
  16842. ++
  16843. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
  16844. ++
  16845. ++ ret = ath10k_pci_wake(ar);
  16846. ++ if (ret) {
  16847. ++ ath10k_err(ar, "failed to wake up target: %d\n", ret);
  16848. ++ return ret;
  16849. ++ }
  16850. ++
  16851. ++ /*
  16852. ++ * Bring the target up cleanly.
  16853. ++ *
  16854. ++ * The target may be in an undefined state with an AUX-powered Target
  16855. ++ * and a Host in WoW mode. If the Host crashes, loses power, or is
  16856. ++ * restarted (without unloading the driver) then the Target is left
  16857. ++ * (aux) powered and running. On a subsequent driver load, the Target
  16858. ++ * is in an unexpected state. We try to catch that here in order to
  16859. ++ * reset the Target and retry the probe.
  16860. ++ */
  16861. ++ ret = ath10k_pci_chip_reset(ar);
  16862. + if (ret) {
  16863. +- ath10k_err("failed to init irqs: %d\n", ret);
  16864. +- goto err_ce;
  16865. +- }
  16866. ++ if (ath10k_pci_has_fw_crashed(ar)) {
  16867. ++ ath10k_warn(ar, "firmware crashed during chip reset\n");
  16868. ++ ath10k_pci_fw_crashed_clear(ar);
  16869. ++ ath10k_pci_fw_crashed_dump(ar);
  16870. ++ }
  16871. +
  16872. +- ret = ath10k_pci_request_early_irq(ar);
  16873. +- if (ret) {
  16874. +- ath10k_err("failed to request early irq: %d\n", ret);
  16875. +- goto err_deinit_irq;
  16876. ++ ath10k_err(ar, "failed to reset chip: %d\n", ret);
  16877. ++ goto err_sleep;
  16878. + }
  16879. +
  16880. +- ret = ath10k_pci_wait_for_target_init(ar);
  16881. ++ ret = ath10k_pci_init_pipes(ar);
  16882. + if (ret) {
  16883. +- ath10k_err("failed to wait for target to init: %d\n", ret);
  16884. +- goto err_free_early_irq;
  16885. ++ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
  16886. ++ goto err_sleep;
  16887. + }
  16888. +
  16889. + ret = ath10k_pci_init_config(ar);
  16890. + if (ret) {
  16891. +- ath10k_err("failed to setup init config: %d\n", ret);
  16892. +- goto err_free_early_irq;
  16893. ++ ath10k_err(ar, "failed to setup init config: %d\n", ret);
  16894. ++ goto err_ce;
  16895. + }
  16896. +
  16897. + ret = ath10k_pci_wake_target_cpu(ar);
  16898. + if (ret) {
  16899. +- ath10k_err("could not wake up target CPU: %d\n", ret);
  16900. +- goto err_free_early_irq;
  16901. ++ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
  16902. ++ goto err_ce;
  16903. + }
  16904. +
  16905. +- if (ar_pci->num_msi_intrs > 1)
  16906. +- irq_mode = "MSI-X";
  16907. +- else if (ar_pci->num_msi_intrs == 1)
  16908. +- irq_mode = "MSI";
  16909. +- else
  16910. +- irq_mode = "legacy";
  16911. +-
  16912. +- if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  16913. +- ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
  16914. +- irq_mode, ath10k_pci_irq_mode,
  16915. +- ath10k_pci_reset_mode);
  16916. +-
  16917. + return 0;
  16918. +
  16919. +-err_free_early_irq:
  16920. +- ath10k_pci_free_early_irq(ar);
  16921. +-err_deinit_irq:
  16922. +- ath10k_pci_deinit_irq(ar);
  16923. + err_ce:
  16924. + ath10k_pci_ce_deinit(ar);
  16925. +- ath10k_pci_warm_reset(ar);
  16926. +-err_ps:
  16927. +- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  16928. +- ath10k_do_pci_sleep(ar);
  16929. +-err:
  16930. +- return ret;
  16931. +-}
  16932. +-
  16933. +-static int ath10k_pci_hif_power_up(struct ath10k *ar)
  16934. +-{
  16935. +- int ret;
  16936. +-
  16937. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
  16938. +-
  16939. +- /*
  16940. +- * Hardware CUS232 version 2 has some issues with cold reset and the
  16941. +- * preferred (and safer) way to perform a device reset is through a
  16942. +- * warm reset.
  16943. +- *
  16944. +- * Warm reset doesn't always work though (notably after a firmware
  16945. +- * crash) so fall back to cold reset if necessary.
  16946. +- */
  16947. +- ret = __ath10k_pci_hif_power_up(ar, false);
  16948. +- if (ret) {
  16949. +- ath10k_warn("failed to power up target using warm reset: %d\n",
  16950. +- ret);
  16951. +-
  16952. +- if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
  16953. +- return ret;
  16954. +
  16955. +- ath10k_warn("trying cold reset\n");
  16956. +-
  16957. +- ret = __ath10k_pci_hif_power_up(ar, true);
  16958. +- if (ret) {
  16959. +- ath10k_err("failed to power up target using cold reset too (%d)\n",
  16960. +- ret);
  16961. +- return ret;
  16962. +- }
  16963. +- }
  16964. +-
  16965. +- return 0;
  16966. ++err_sleep:
  16967. ++ ath10k_pci_sleep(ar);
  16968. ++ return ret;
  16969. + }
  16970. +
  16971. + static void ath10k_pci_hif_power_down(struct ath10k *ar)
  16972. + {
  16973. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  16974. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
  16975. +
  16976. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
  16977. +-
  16978. +- ath10k_pci_free_early_irq(ar);
  16979. +- ath10k_pci_kill_tasklet(ar);
  16980. +- ath10k_pci_deinit_irq(ar);
  16981. +- ath10k_pci_ce_deinit(ar);
  16982. +- ath10k_pci_warm_reset(ar);
  16983. ++ /* Currently hif_power_up performs effectively a reset and hif_stop
  16984. ++ * resets the chip as well so there's no point in resetting here.
  16985. ++ */
  16986. +
  16987. +- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  16988. +- ath10k_do_pci_sleep(ar);
  16989. ++ ath10k_pci_sleep(ar);
  16990. + }
  16991. +
  16992. + #ifdef CONFIG_PM
  16993. +@@ -2090,6 +2089,8 @@ static int ath10k_pci_hif_resume(struct
  16994. +
  16995. + static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
  16996. + .tx_sg = ath10k_pci_hif_tx_sg,
  16997. ++ .diag_read = ath10k_pci_hif_diag_read,
  16998. ++ .diag_write = ath10k_pci_diag_write_mem,
  16999. + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
  17000. + .start = ath10k_pci_hif_start,
  17001. + .stop = ath10k_pci_hif_stop,
  17002. +@@ -2100,6 +2101,8 @@ static const struct ath10k_hif_ops ath10
  17003. + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
  17004. + .power_up = ath10k_pci_hif_power_up,
  17005. + .power_down = ath10k_pci_hif_power_down,
  17006. ++ .read32 = ath10k_pci_read32,
  17007. ++ .write32 = ath10k_pci_write32,
  17008. + #ifdef CONFIG_PM
  17009. + .suspend = ath10k_pci_hif_suspend,
  17010. + .resume = ath10k_pci_hif_resume,
  17011. +@@ -2118,7 +2121,14 @@ static void ath10k_msi_err_tasklet(unsig
  17012. + {
  17013. + struct ath10k *ar = (struct ath10k *)data;
  17014. +
  17015. +- ath10k_pci_fw_interrupt_handler(ar);
  17016. ++ if (!ath10k_pci_has_fw_crashed(ar)) {
  17017. ++ ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
  17018. ++ return;
  17019. ++ }
  17020. ++
  17021. ++ ath10k_pci_irq_disable(ar);
  17022. ++ ath10k_pci_fw_crashed_clear(ar);
  17023. ++ ath10k_pci_fw_crashed_dump(ar);
  17024. + }
  17025. +
  17026. + /*
  17027. +@@ -2132,7 +2142,8 @@ static irqreturn_t ath10k_pci_per_engine
  17028. + int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
  17029. +
  17030. + if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
  17031. +- ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
  17032. ++ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
  17033. ++ ce_id);
  17034. + return IRQ_HANDLED;
  17035. + }
  17036. +
  17037. +@@ -2179,39 +2190,18 @@ static irqreturn_t ath10k_pci_interrupt_
  17038. + return IRQ_HANDLED;
  17039. + }
  17040. +
  17041. +-static void ath10k_pci_early_irq_tasklet(unsigned long data)
  17042. ++static void ath10k_pci_tasklet(unsigned long data)
  17043. + {
  17044. + struct ath10k *ar = (struct ath10k *)data;
  17045. +- u32 fw_ind;
  17046. +- int ret;
  17047. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17048. +
  17049. +- ret = ath10k_pci_wake(ar);
  17050. +- if (ret) {
  17051. +- ath10k_warn("failed to wake target in early irq tasklet: %d\n",
  17052. +- ret);
  17053. ++ if (ath10k_pci_has_fw_crashed(ar)) {
  17054. ++ ath10k_pci_irq_disable(ar);
  17055. ++ ath10k_pci_fw_crashed_clear(ar);
  17056. ++ ath10k_pci_fw_crashed_dump(ar);
  17057. + return;
  17058. + }
  17059. +
  17060. +- fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  17061. +- if (fw_ind & FW_IND_EVENT_PENDING) {
  17062. +- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
  17063. +- fw_ind & ~FW_IND_EVENT_PENDING);
  17064. +-
  17065. +- /* Some structures are unavailable during early boot or at
  17066. +- * driver teardown so just print that the device has crashed. */
  17067. +- ath10k_warn("device crashed - no diagnostics available\n");
  17068. +- }
  17069. +-
  17070. +- ath10k_pci_sleep(ar);
  17071. +- ath10k_pci_enable_legacy_irq(ar);
  17072. +-}
  17073. +-
  17074. +-static void ath10k_pci_tasklet(unsigned long data)
  17075. +-{
  17076. +- struct ath10k *ar = (struct ath10k *)data;
  17077. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17078. +-
  17079. +- ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
  17080. + ath10k_ce_per_engine_service_any(ar);
  17081. +
  17082. + /* Re-enable legacy irq that was disabled in the irq handler */
  17083. +@@ -2228,7 +2218,7 @@ static int ath10k_pci_request_irq_msix(s
  17084. + ath10k_pci_msi_fw_handler,
  17085. + IRQF_SHARED, "ath10k_pci", ar);
  17086. + if (ret) {
  17087. +- ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
  17088. ++ ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
  17089. + ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
  17090. + return ret;
  17091. + }
  17092. +@@ -2238,7 +2228,7 @@ static int ath10k_pci_request_irq_msix(s
  17093. + ath10k_pci_per_engine_handler,
  17094. + IRQF_SHARED, "ath10k_pci", ar);
  17095. + if (ret) {
  17096. +- ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
  17097. ++ ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
  17098. + ar_pci->pdev->irq + i, ret);
  17099. +
  17100. + for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
  17101. +@@ -2261,7 +2251,7 @@ static int ath10k_pci_request_irq_msi(st
  17102. + ath10k_pci_interrupt_handler,
  17103. + IRQF_SHARED, "ath10k_pci", ar);
  17104. + if (ret) {
  17105. +- ath10k_warn("failed to request MSI irq %d: %d\n",
  17106. ++ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
  17107. + ar_pci->pdev->irq, ret);
  17108. + return ret;
  17109. + }
  17110. +@@ -2278,7 +2268,7 @@ static int ath10k_pci_request_irq_legacy
  17111. + ath10k_pci_interrupt_handler,
  17112. + IRQF_SHARED, "ath10k_pci", ar);
  17113. + if (ret) {
  17114. +- ath10k_warn("failed to request legacy irq %d: %d\n",
  17115. ++ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
  17116. + ar_pci->pdev->irq, ret);
  17117. + return ret;
  17118. + }
  17119. +@@ -2299,7 +2289,7 @@ static int ath10k_pci_request_irq(struct
  17120. + return ath10k_pci_request_irq_msix(ar);
  17121. + }
  17122. +
  17123. +- ath10k_warn("unknown irq configuration upon request\n");
  17124. ++ ath10k_warn(ar, "unknown irq configuration upon request\n");
  17125. + return -EINVAL;
  17126. + }
  17127. +
  17128. +@@ -2322,8 +2312,6 @@ static void ath10k_pci_init_irq_tasklets
  17129. + tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
  17130. + tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
  17131. + (unsigned long)ar);
  17132. +- tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
  17133. +- (unsigned long)ar);
  17134. +
  17135. + for (i = 0; i < CE_COUNT; i++) {
  17136. + ar_pci->pipe_info[i].ar_pci = ar_pci;
  17137. +@@ -2335,21 +2323,19 @@ static void ath10k_pci_init_irq_tasklets
  17138. + static int ath10k_pci_init_irq(struct ath10k *ar)
  17139. + {
  17140. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17141. +- bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
  17142. +- ar_pci->features);
  17143. + int ret;
  17144. +
  17145. + ath10k_pci_init_irq_tasklets(ar);
  17146. +
  17147. +- if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
  17148. +- !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
  17149. +- ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
  17150. ++ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
  17151. ++ ath10k_info(ar, "limiting irq mode to: %d\n",
  17152. ++ ath10k_pci_irq_mode);
  17153. +
  17154. + /* Try MSI-X */
  17155. +- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
  17156. ++ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
  17157. + ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
  17158. + ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
  17159. +- ar_pci->num_msi_intrs);
  17160. ++ ar_pci->num_msi_intrs);
  17161. + if (ret > 0)
  17162. + return 0;
  17163. +
  17164. +@@ -2376,34 +2362,16 @@ static int ath10k_pci_init_irq(struct at
  17165. + * synchronization checking. */
  17166. + ar_pci->num_msi_intrs = 0;
  17167. +
  17168. +- ret = ath10k_pci_wake(ar);
  17169. +- if (ret) {
  17170. +- ath10k_warn("failed to wake target: %d\n", ret);
  17171. +- return ret;
  17172. +- }
  17173. +-
  17174. + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  17175. + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  17176. +- ath10k_pci_sleep(ar);
  17177. +
  17178. + return 0;
  17179. + }
  17180. +
  17181. +-static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
  17182. ++static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
  17183. + {
  17184. +- int ret;
  17185. +-
  17186. +- ret = ath10k_pci_wake(ar);
  17187. +- if (ret) {
  17188. +- ath10k_warn("failed to wake target: %d\n", ret);
  17189. +- return ret;
  17190. +- }
  17191. +-
  17192. + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
  17193. + 0);
  17194. +- ath10k_pci_sleep(ar);
  17195. +-
  17196. +- return 0;
  17197. + }
  17198. +
  17199. + static int ath10k_pci_deinit_irq(struct ath10k *ar)
  17200. +@@ -2412,7 +2380,8 @@ static int ath10k_pci_deinit_irq(struct
  17201. +
  17202. + switch (ar_pci->num_msi_intrs) {
  17203. + case 0:
  17204. +- return ath10k_pci_deinit_irq_legacy(ar);
  17205. ++ ath10k_pci_deinit_irq_legacy(ar);
  17206. ++ return 0;
  17207. + case 1:
  17208. + /* fall-through */
  17209. + case MSI_NUM_REQUEST:
  17210. +@@ -2422,7 +2391,7 @@ static int ath10k_pci_deinit_irq(struct
  17211. + pci_disable_msi(ar_pci->pdev);
  17212. + }
  17213. +
  17214. +- ath10k_warn("unknown irq configuration upon deinit\n");
  17215. ++ ath10k_warn(ar, "unknown irq configuration upon deinit\n");
  17216. + return -EINVAL;
  17217. + }
  17218. +
  17219. +@@ -2430,23 +2399,17 @@ static int ath10k_pci_wait_for_target_in
  17220. + {
  17221. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17222. + unsigned long timeout;
  17223. +- int ret;
  17224. + u32 val;
  17225. +
  17226. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
  17227. +-
  17228. +- ret = ath10k_pci_wake(ar);
  17229. +- if (ret) {
  17230. +- ath10k_err("failed to wake up target for init: %d\n", ret);
  17231. +- return ret;
  17232. +- }
  17233. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
  17234. +
  17235. + timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
  17236. +
  17237. + do {
  17238. + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
  17239. +
  17240. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
  17241. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
  17242. ++ val);
  17243. +
  17244. + /* target should never return this */
  17245. + if (val == 0xffffffff)
  17246. +@@ -2461,52 +2424,46 @@ static int ath10k_pci_wait_for_target_in
  17247. +
  17248. + if (ar_pci->num_msi_intrs == 0)
  17249. + /* Fix potential race by repeating CORE_BASE writes */
  17250. +- ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
  17251. +- PCIE_INTR_FIRMWARE_MASK |
  17252. +- PCIE_INTR_CE_MASK_ALL);
  17253. ++ ath10k_pci_enable_legacy_irq(ar);
  17254. +
  17255. + mdelay(10);
  17256. + } while (time_before(jiffies, timeout));
  17257. +
  17258. ++ ath10k_pci_disable_and_clear_legacy_irq(ar);
  17259. ++ ath10k_pci_irq_msi_fw_mask(ar);
  17260. ++
  17261. + if (val == 0xffffffff) {
  17262. +- ath10k_err("failed to read device register, device is gone\n");
  17263. +- ret = -EIO;
  17264. +- goto out;
  17265. ++ ath10k_err(ar, "failed to read device register, device is gone\n");
  17266. ++ return -EIO;
  17267. + }
  17268. +
  17269. + if (val & FW_IND_EVENT_PENDING) {
  17270. +- ath10k_warn("device has crashed during init\n");
  17271. +- ret = -ECOMM;
  17272. +- goto out;
  17273. ++ ath10k_warn(ar, "device has crashed during init\n");
  17274. ++ return -ECOMM;
  17275. + }
  17276. +
  17277. + if (!(val & FW_IND_INITIALIZED)) {
  17278. +- ath10k_err("failed to receive initialized event from target: %08x\n",
  17279. ++ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
  17280. + val);
  17281. +- ret = -ETIMEDOUT;
  17282. +- goto out;
  17283. ++ return -ETIMEDOUT;
  17284. + }
  17285. +
  17286. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
  17287. +-
  17288. +-out:
  17289. +- ath10k_pci_sleep(ar);
  17290. +- return ret;
  17291. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
  17292. ++ return 0;
  17293. + }
  17294. +
  17295. + static int ath10k_pci_cold_reset(struct ath10k *ar)
  17296. + {
  17297. +- int i, ret;
  17298. ++ int i;
  17299. + u32 val;
  17300. +
  17301. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
  17302. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
  17303. +
  17304. +- ret = ath10k_do_pci_wake(ar);
  17305. +- if (ret) {
  17306. +- ath10k_err("failed to wake up target: %d\n",
  17307. +- ret);
  17308. +- return ret;
  17309. +- }
  17310. ++ spin_lock_bh(&ar->data_lock);
  17311. ++
  17312. ++ ar->stats.fw_cold_reset_counter++;
  17313. ++
  17314. ++ spin_unlock_bh(&ar->data_lock);
  17315. +
  17316. + /* Put Target, including PCIe, into RESET. */
  17317. + val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
  17318. +@@ -2531,181 +2488,227 @@ static int ath10k_pci_cold_reset(struct
  17319. + msleep(1);
  17320. + }
  17321. +
  17322. +- ath10k_do_pci_sleep(ar);
  17323. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
  17324. ++
  17325. ++ return 0;
  17326. ++}
  17327. ++
  17328. ++static int ath10k_pci_claim(struct ath10k *ar)
  17329. ++{
  17330. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17331. ++ struct pci_dev *pdev = ar_pci->pdev;
  17332. ++ u32 lcr_val;
  17333. ++ int ret;
  17334. ++
  17335. ++ pci_set_drvdata(pdev, ar);
  17336. ++
  17337. ++ ret = pci_enable_device(pdev);
  17338. ++ if (ret) {
  17339. ++ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
  17340. ++ return ret;
  17341. ++ }
  17342. ++
  17343. ++ ret = pci_request_region(pdev, BAR_NUM, "ath");
  17344. ++ if (ret) {
  17345. ++ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
  17346. ++ ret);
  17347. ++ goto err_device;
  17348. ++ }
  17349. ++
  17350. ++ /* Target expects 32 bit DMA. Enforce it. */
  17351. ++ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  17352. ++ if (ret) {
  17353. ++ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
  17354. ++ goto err_region;
  17355. ++ }
  17356. ++
  17357. ++ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  17358. ++ if (ret) {
  17359. ++ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
  17360. ++ ret);
  17361. ++ goto err_region;
  17362. ++ }
  17363. ++
  17364. ++ pci_set_master(pdev);
  17365. ++
  17366. ++ /* Workaround: Disable ASPM */
  17367. ++ pci_read_config_dword(pdev, 0x80, &lcr_val);
  17368. ++ pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  17369. +
  17370. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
  17371. ++ /* Arrange for access to Target SoC registers. */
  17372. ++ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
  17373. ++ if (!ar_pci->mem) {
  17374. ++ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
  17375. ++ ret = -EIO;
  17376. ++ goto err_master;
  17377. ++ }
  17378. +
  17379. ++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
  17380. + return 0;
  17381. ++
  17382. ++err_master:
  17383. ++ pci_clear_master(pdev);
  17384. ++
  17385. ++err_region:
  17386. ++ pci_release_region(pdev, BAR_NUM);
  17387. ++
  17388. ++err_device:
  17389. ++ pci_disable_device(pdev);
  17390. ++
  17391. ++ return ret;
  17392. + }
  17393. +
  17394. +-static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
  17395. ++static void ath10k_pci_release(struct ath10k *ar)
  17396. + {
  17397. ++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17398. ++ struct pci_dev *pdev = ar_pci->pdev;
  17399. ++
  17400. ++ pci_iounmap(pdev, ar_pci->mem);
  17401. ++ pci_release_region(pdev, BAR_NUM);
  17402. ++ pci_clear_master(pdev);
  17403. ++ pci_disable_device(pdev);
  17404. ++}
  17405. ++
  17406. ++static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
  17407. ++{
  17408. ++ const struct ath10k_pci_supp_chip *supp_chip;
  17409. + int i;
  17410. ++ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
  17411. +
  17412. +- for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
  17413. +- if (!test_bit(i, ar_pci->features))
  17414. +- continue;
  17415. ++ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
  17416. ++ supp_chip = &ath10k_pci_supp_chips[i];
  17417. +
  17418. +- switch (i) {
  17419. +- case ATH10K_PCI_FEATURE_MSI_X:
  17420. +- ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
  17421. +- break;
  17422. +- case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
  17423. +- ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
  17424. +- break;
  17425. +- }
  17426. ++ if (supp_chip->dev_id == dev_id &&
  17427. ++ supp_chip->rev_id == rev_id)
  17428. ++ return true;
  17429. + }
  17430. ++
  17431. ++ return false;
  17432. + }
  17433. +
  17434. + static int ath10k_pci_probe(struct pci_dev *pdev,
  17435. + const struct pci_device_id *pci_dev)
  17436. + {
  17437. +- void __iomem *mem;
  17438. + int ret = 0;
  17439. + struct ath10k *ar;
  17440. + struct ath10k_pci *ar_pci;
  17441. +- u32 lcr_val, chip_id;
  17442. +-
  17443. +- ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
  17444. +-
  17445. +- ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
  17446. +- if (ar_pci == NULL)
  17447. +- return -ENOMEM;
  17448. +-
  17449. +- ar_pci->pdev = pdev;
  17450. +- ar_pci->dev = &pdev->dev;
  17451. ++ enum ath10k_hw_rev hw_rev;
  17452. ++ u32 chip_id;
  17453. +
  17454. + switch (pci_dev->device) {
  17455. + case QCA988X_2_0_DEVICE_ID:
  17456. +- set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
  17457. ++ hw_rev = ATH10K_HW_QCA988X;
  17458. ++ break;
  17459. ++ case QCA6174_2_1_DEVICE_ID:
  17460. ++ hw_rev = ATH10K_HW_QCA6174;
  17461. + break;
  17462. + default:
  17463. +- ret = -ENODEV;
  17464. +- ath10k_err("Unknown device ID: %d\n", pci_dev->device);
  17465. +- goto err_ar_pci;
  17466. ++ WARN_ON(1);
  17467. ++ return -ENOTSUPP;
  17468. + }
  17469. +
  17470. +- if (ath10k_pci_target_ps)
  17471. +- set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
  17472. +-
  17473. +- ath10k_pci_dump_features(ar_pci);
  17474. +-
  17475. +- ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
  17476. ++ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
  17477. ++ hw_rev, &ath10k_pci_hif_ops);
  17478. + if (!ar) {
  17479. +- ath10k_err("failed to create driver core\n");
  17480. +- ret = -EINVAL;
  17481. +- goto err_ar_pci;
  17482. ++ dev_err(&pdev->dev, "failed to allocate core\n");
  17483. ++ return -ENOMEM;
  17484. + }
  17485. +
  17486. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
  17487. ++
  17488. ++ ar_pci = ath10k_pci_priv(ar);
  17489. ++ ar_pci->pdev = pdev;
  17490. ++ ar_pci->dev = &pdev->dev;
  17491. + ar_pci->ar = ar;
  17492. +- atomic_set(&ar_pci->keep_awake_count, 0);
  17493. +
  17494. +- pci_set_drvdata(pdev, ar);
  17495. ++ spin_lock_init(&ar_pci->ce_lock);
  17496. ++ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
  17497. ++ (unsigned long)ar);
  17498. +
  17499. +- /*
  17500. +- * Without any knowledge of the Host, the Target may have been reset or
  17501. +- * power cycled and its Config Space may no longer reflect the PCI
  17502. +- * address space that was assigned earlier by the PCI infrastructure.
  17503. +- * Refresh it now.
  17504. +- */
  17505. +- ret = pci_assign_resource(pdev, BAR_NUM);
  17506. ++ ret = ath10k_pci_claim(ar);
  17507. + if (ret) {
  17508. +- ath10k_err("failed to assign PCI space: %d\n", ret);
  17509. +- goto err_ar;
  17510. ++ ath10k_err(ar, "failed to claim device: %d\n", ret);
  17511. ++ goto err_core_destroy;
  17512. + }
  17513. +
  17514. +- ret = pci_enable_device(pdev);
  17515. ++ ret = ath10k_pci_wake(ar);
  17516. + if (ret) {
  17517. +- ath10k_err("failed to enable PCI device: %d\n", ret);
  17518. +- goto err_ar;
  17519. ++ ath10k_err(ar, "failed to wake up: %d\n", ret);
  17520. ++ goto err_release;
  17521. + }
  17522. +
  17523. +- /* Request MMIO resources */
  17524. +- ret = pci_request_region(pdev, BAR_NUM, "ath");
  17525. ++ ret = ath10k_pci_alloc_pipes(ar);
  17526. + if (ret) {
  17527. +- ath10k_err("failed to request MMIO region: %d\n", ret);
  17528. +- goto err_device;
  17529. ++ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
  17530. ++ ret);
  17531. ++ goto err_sleep;
  17532. + }
  17533. +
  17534. +- /*
  17535. +- * Target structures have a limit of 32 bit DMA pointers.
  17536. +- * DMA pointers can be wider than 32 bits by default on some systems.
  17537. +- */
  17538. +- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  17539. +- if (ret) {
  17540. +- ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
  17541. +- goto err_region;
  17542. +- }
  17543. ++ ath10k_pci_ce_deinit(ar);
  17544. ++ ath10k_pci_irq_disable(ar);
  17545. +
  17546. +- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  17547. ++ ret = ath10k_pci_init_irq(ar);
  17548. + if (ret) {
  17549. +- ath10k_err("failed to set consistent DMA mask to 32-bit\n");
  17550. +- goto err_region;
  17551. ++ ath10k_err(ar, "failed to init irqs: %d\n", ret);
  17552. ++ goto err_free_pipes;
  17553. + }
  17554. +
  17555. +- /* Set bus master bit in PCI_COMMAND to enable DMA */
  17556. +- pci_set_master(pdev);
  17557. +-
  17558. +- /*
  17559. +- * Temporary FIX: disable ASPM
  17560. +- * Will be removed after the OTP is programmed
  17561. +- */
  17562. +- pci_read_config_dword(pdev, 0x80, &lcr_val);
  17563. +- pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
  17564. ++ ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
  17565. ++ ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
  17566. ++ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
  17567. +
  17568. +- /* Arrange for access to Target SoC registers. */
  17569. +- mem = pci_iomap(pdev, BAR_NUM, 0);
  17570. +- if (!mem) {
  17571. +- ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
  17572. +- ret = -EIO;
  17573. +- goto err_master;
  17574. ++ ret = ath10k_pci_request_irq(ar);
  17575. ++ if (ret) {
  17576. ++ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
  17577. ++ goto err_deinit_irq;
  17578. + }
  17579. +
  17580. +- ar_pci->mem = mem;
  17581. +-
  17582. +- spin_lock_init(&ar_pci->ce_lock);
  17583. +-
  17584. +- ret = ath10k_do_pci_wake(ar);
  17585. ++ ret = ath10k_pci_chip_reset(ar);
  17586. + if (ret) {
  17587. +- ath10k_err("Failed to get chip id: %d\n", ret);
  17588. +- goto err_iomap;
  17589. ++ ath10k_err(ar, "failed to reset chip: %d\n", ret);
  17590. ++ goto err_free_irq;
  17591. + }
  17592. +
  17593. + chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
  17594. ++ if (chip_id == 0xffffffff) {
  17595. ++ ath10k_err(ar, "failed to get chip id\n");
  17596. ++ goto err_free_irq;
  17597. ++ }
  17598. +
  17599. +- ath10k_do_pci_sleep(ar);
  17600. +-
  17601. +- ret = ath10k_pci_alloc_ce(ar);
  17602. +- if (ret) {
  17603. +- ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
  17604. +- goto err_iomap;
  17605. ++ if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
  17606. ++ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
  17607. ++ pdev->device, chip_id);
  17608. ++ goto err_sleep;
  17609. + }
  17610. +
  17611. +- ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
  17612. ++ ath10k_pci_sleep(ar);
  17613. +
  17614. + ret = ath10k_core_register(ar, chip_id);
  17615. + if (ret) {
  17616. +- ath10k_err("failed to register driver core: %d\n", ret);
  17617. +- goto err_free_ce;
  17618. ++ ath10k_err(ar, "failed to register driver core: %d\n", ret);
  17619. ++ goto err_free_irq;
  17620. + }
  17621. +
  17622. + return 0;
  17623. +
  17624. +-err_free_ce:
  17625. +- ath10k_pci_free_ce(ar);
  17626. +-err_iomap:
  17627. +- pci_iounmap(pdev, mem);
  17628. +-err_master:
  17629. +- pci_clear_master(pdev);
  17630. +-err_region:
  17631. +- pci_release_region(pdev, BAR_NUM);
  17632. +-err_device:
  17633. +- pci_disable_device(pdev);
  17634. +-err_ar:
  17635. ++err_free_irq:
  17636. ++ ath10k_pci_free_irq(ar);
  17637. ++ ath10k_pci_kill_tasklet(ar);
  17638. ++
  17639. ++err_deinit_irq:
  17640. ++ ath10k_pci_deinit_irq(ar);
  17641. ++
  17642. ++err_free_pipes:
  17643. ++ ath10k_pci_free_pipes(ar);
  17644. ++
  17645. ++err_sleep:
  17646. ++ ath10k_pci_sleep(ar);
  17647. ++
  17648. ++err_release:
  17649. ++ ath10k_pci_release(ar);
  17650. ++
  17651. ++err_core_destroy:
  17652. + ath10k_core_destroy(ar);
  17653. +-err_ar_pci:
  17654. +- /* call HIF PCI free here */
  17655. +- kfree(ar_pci);
  17656. +
  17657. + return ret;
  17658. + }
  17659. +@@ -2715,7 +2718,7 @@ static void ath10k_pci_remove(struct pci
  17660. + struct ath10k *ar = pci_get_drvdata(pdev);
  17661. + struct ath10k_pci *ar_pci;
  17662. +
  17663. +- ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
  17664. ++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
  17665. +
  17666. + if (!ar)
  17667. + return;
  17668. +@@ -2725,18 +2728,14 @@ static void ath10k_pci_remove(struct pci
  17669. + if (!ar_pci)
  17670. + return;
  17671. +
  17672. +- tasklet_kill(&ar_pci->msi_fw_err);
  17673. +-
  17674. + ath10k_core_unregister(ar);
  17675. +- ath10k_pci_free_ce(ar);
  17676. +-
  17677. +- pci_iounmap(pdev, ar_pci->mem);
  17678. +- pci_release_region(pdev, BAR_NUM);
  17679. +- pci_clear_master(pdev);
  17680. +- pci_disable_device(pdev);
  17681. +-
  17682. ++ ath10k_pci_free_irq(ar);
  17683. ++ ath10k_pci_kill_tasklet(ar);
  17684. ++ ath10k_pci_deinit_irq(ar);
  17685. ++ ath10k_pci_ce_deinit(ar);
  17686. ++ ath10k_pci_free_pipes(ar);
  17687. ++ ath10k_pci_release(ar);
  17688. + ath10k_core_destroy(ar);
  17689. +- kfree(ar_pci);
  17690. + }
  17691. +
  17692. + MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
  17693. +@@ -2754,7 +2753,8 @@ static int __init ath10k_pci_init(void)
  17694. +
  17695. + ret = pci_register_driver(&ath10k_pci_driver);
  17696. + if (ret)
  17697. +- ath10k_err("failed to register PCI driver: %d\n", ret);
  17698. ++ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
  17699. ++ ret);
  17700. +
  17701. + return ret;
  17702. + }
  17703. +@@ -2770,5 +2770,7 @@ module_exit(ath10k_pci_exit);
  17704. + MODULE_AUTHOR("Qualcomm Atheros");
  17705. + MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
  17706. + MODULE_LICENSE("Dual BSD/GPL");
  17707. +-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
  17708. ++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
  17709. ++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
  17710. ++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
  17711. + MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
  17712. +--- a/drivers/net/wireless/ath/ath10k/pci.h
  17713. ++++ b/drivers/net/wireless/ath/ath10k/pci.h
  17714. +@@ -23,9 +23,6 @@
  17715. + #include "hw.h"
  17716. + #include "ce.h"
  17717. +
  17718. +-/* FW dump area */
  17719. +-#define REG_DUMP_COUNT_QCA988X 60
  17720. +-
  17721. + /*
  17722. + * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
  17723. + */
  17724. +@@ -38,7 +35,8 @@
  17725. + #define DIAG_TRANSFER_LIMIT 2048
  17726. +
  17727. + struct bmi_xfer {
  17728. +- struct completion done;
  17729. ++ bool tx_done;
  17730. ++ bool rx_done;
  17731. + bool wait_for_resp;
  17732. + u32 resp_len;
  17733. + };
  17734. +@@ -102,12 +100,12 @@ struct pcie_state {
  17735. + * NOTE: Structure is shared between Host software and Target firmware!
  17736. + */
  17737. + struct ce_pipe_config {
  17738. +- u32 pipenum;
  17739. +- u32 pipedir;
  17740. +- u32 nentries;
  17741. +- u32 nbytes_max;
  17742. +- u32 flags;
  17743. +- u32 reserved;
  17744. ++ __le32 pipenum;
  17745. ++ __le32 pipedir;
  17746. ++ __le32 nentries;
  17747. ++ __le32 nbytes_max;
  17748. ++ __le32 flags;
  17749. ++ __le32 reserved;
  17750. + };
  17751. +
  17752. + /*
  17753. +@@ -129,17 +127,9 @@ struct ce_pipe_config {
  17754. +
  17755. + /* Establish a mapping between a service/direction and a pipe. */
  17756. + struct service_to_pipe {
  17757. +- u32 service_id;
  17758. +- u32 pipedir;
  17759. +- u32 pipenum;
  17760. +-};
  17761. +-
  17762. +-enum ath10k_pci_features {
  17763. +- ATH10K_PCI_FEATURE_MSI_X = 0,
  17764. +- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
  17765. +-
  17766. +- /* keep last */
  17767. +- ATH10K_PCI_FEATURE_COUNT
  17768. ++ __le32 service_id;
  17769. ++ __le32 pipedir;
  17770. ++ __le32 pipenum;
  17771. + };
  17772. +
  17773. + /* Per-pipe state. */
  17774. +@@ -162,14 +152,17 @@ struct ath10k_pci_pipe {
  17775. + struct tasklet_struct intr;
  17776. + };
  17777. +
  17778. ++struct ath10k_pci_supp_chip {
  17779. ++ u32 dev_id;
  17780. ++ u32 rev_id;
  17781. ++};
  17782. ++
  17783. + struct ath10k_pci {
  17784. + struct pci_dev *pdev;
  17785. + struct device *dev;
  17786. + struct ath10k *ar;
  17787. + void __iomem *mem;
  17788. +
  17789. +- DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
  17790. +-
  17791. + /*
  17792. + * Number of MSI interrupts granted, 0 --> using legacy PCI line
  17793. + * interrupts.
  17794. +@@ -178,12 +171,6 @@ struct ath10k_pci {
  17795. +
  17796. + struct tasklet_struct intr_tq;
  17797. + struct tasklet_struct msi_fw_err;
  17798. +- struct tasklet_struct early_irq_tasklet;
  17799. +-
  17800. +- int started;
  17801. +-
  17802. +- atomic_t keep_awake_count;
  17803. +- bool verified_awake;
  17804. +
  17805. + struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
  17806. +
  17807. +@@ -197,29 +184,17 @@ struct ath10k_pci {
  17808. +
  17809. + /* Map CE id to ce_state */
  17810. + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
  17811. ++ struct timer_list rx_post_retry;
  17812. + };
  17813. +
  17814. + static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
  17815. + {
  17816. +- return ar->hif.priv;
  17817. +-}
  17818. +-
  17819. +-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
  17820. +-{
  17821. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17822. +-
  17823. +- return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
  17824. +-}
  17825. +-
  17826. +-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
  17827. +-{
  17828. +- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17829. +-
  17830. +- iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
  17831. ++ return (struct ath10k_pci *)ar->drv_priv;
  17832. + }
  17833. +
  17834. ++#define ATH10K_PCI_RX_POST_RETRY_MS 50
  17835. + #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
  17836. +-#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
  17837. ++#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
  17838. +
  17839. + #define BAR_NUM 0
  17840. +
  17841. +@@ -241,35 +216,17 @@ static inline void ath10k_pci_reg_write3
  17842. + /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
  17843. + #define DIAG_ACCESS_CE_TIMEOUT_MS 10
  17844. +
  17845. +-/*
  17846. +- * This API allows the Host to access Target registers directly
  17847. +- * and relatively efficiently over PCIe.
  17848. +- * This allows the Host to avoid extra overhead associated with
  17849. +- * sending a message to firmware and waiting for a response message
  17850. +- * from firmware, as is done on other interconnects.
  17851. +- *
  17852. +- * Yet there is some complexity with direct accesses because the
  17853. +- * Target's power state is not known a priori. The Host must issue
  17854. +- * special PCIe reads/writes in order to explicitly wake the Target
  17855. +- * and to verify that it is awake and will remain awake.
  17856. +- *
  17857. +- * Usage:
  17858. ++/* Target exposes its registers for direct access. However before host can
  17859. ++ * access them it needs to make sure the target is awake (ath10k_pci_wake,
  17860. ++ * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
  17861. ++ * to sleep unless host tells it to (ath10k_pci_sleep).
  17862. + *
  17863. +- * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
  17864. +- * These calls must be bracketed by ath10k_pci_wake and
  17865. +- * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
  17866. +- * multiple READ/WRITE operations.
  17867. ++ * If host tries to access target registers without waking it up it can
  17868. ++ * scribble over host memory.
  17869. + *
  17870. +- * Use ath10k_pci_wake to put the Target in a state in
  17871. +- * which it is legal for the Host to directly access it. This
  17872. +- * may involve waking the Target from a low power state, which
  17873. +- * may take up to 2Ms!
  17874. +- *
  17875. +- * Use ath10k_pci_sleep to tell the Target that as far as
  17876. +- * this code path is concerned, it no longer needs to remain
  17877. +- * directly accessible. BEGIN/END is under a reference counter;
  17878. +- * multiple code paths may issue BEGIN/END on a single targid.
  17879. ++ * If target is asleep waking it up may take up to even 2ms.
  17880. + */
  17881. ++
  17882. + static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
  17883. + u32 value)
  17884. + {
  17885. +@@ -295,25 +252,18 @@ static inline void ath10k_pci_soc_write3
  17886. + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
  17887. + }
  17888. +
  17889. +-int ath10k_do_pci_wake(struct ath10k *ar);
  17890. +-void ath10k_do_pci_sleep(struct ath10k *ar);
  17891. +-
  17892. +-static inline int ath10k_pci_wake(struct ath10k *ar)
  17893. ++static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
  17894. + {
  17895. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17896. +
  17897. +- if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  17898. +- return ath10k_do_pci_wake(ar);
  17899. +-
  17900. +- return 0;
  17901. ++ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
  17902. + }
  17903. +
  17904. +-static inline void ath10k_pci_sleep(struct ath10k *ar)
  17905. ++static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
  17906. + {
  17907. + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  17908. +
  17909. +- if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
  17910. +- ath10k_do_pci_sleep(ar);
  17911. ++ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
  17912. + }
  17913. +
  17914. + #endif /* _PCI_H_ */
  17915. +--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
  17916. ++++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
  17917. +@@ -839,7 +839,6 @@ struct rx_ppdu_start {
  17918. + * Reserved: HW should fill with 0, FW should ignore.
  17919. + */
  17920. +
  17921. +-
  17922. + #define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
  17923. + #define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
  17924. + #define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
  17925. +@@ -851,7 +850,7 @@ struct rx_ppdu_start {
  17926. +
  17927. + #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
  17928. +
  17929. +-struct rx_ppdu_end {
  17930. ++struct rx_ppdu_end_common {
  17931. + __le32 evm_p0;
  17932. + __le32 evm_p1;
  17933. + __le32 evm_p2;
  17934. +@@ -874,10 +873,33 @@ struct rx_ppdu_end {
  17935. + u8 phy_err_code;
  17936. + __le16 flags; /* %RX_PPDU_END_FLAGS_ */
  17937. + __le32 info0; /* %RX_PPDU_END_INFO0_ */
  17938. ++} __packed;
  17939. ++
  17940. ++struct rx_ppdu_end_qca988x {
  17941. ++ __le16 bb_length;
  17942. ++ __le16 info1; /* %RX_PPDU_END_INFO1_ */
  17943. ++} __packed;
  17944. ++
  17945. ++#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
  17946. ++#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
  17947. ++#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
  17948. ++#define RX_PPDU_END_RTT_UNUSED_LSB 24
  17949. ++#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
  17950. ++
  17951. ++struct rx_ppdu_end_qca6174 {
  17952. ++ __le32 rtt; /* %RX_PPDU_END_RTT_ */
  17953. + __le16 bb_length;
  17954. + __le16 info1; /* %RX_PPDU_END_INFO1_ */
  17955. + } __packed;
  17956. +
  17957. ++struct rx_ppdu_end {
  17958. ++ struct rx_ppdu_end_common common;
  17959. ++ union {
  17960. ++ struct rx_ppdu_end_qca988x qca988x;
  17961. ++ struct rx_ppdu_end_qca6174 qca6174;
  17962. ++ } __packed;
  17963. ++} __packed;
  17964. ++
  17965. + /*
  17966. + * evm_p0
  17967. + * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
  17968. +--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
  17969. ++++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
  17970. +@@ -18,6 +18,8 @@
  17971. + #ifndef __TARGADDRS_H__
  17972. + #define __TARGADDRS_H__
  17973. +
  17974. ++#include "hw.h"
  17975. ++
  17976. + /*
  17977. + * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
  17978. + * host_interest structure. It must match the address of the _host_interest
  17979. +@@ -284,7 +286,6 @@ Fw Mode/SubMode Mask
  17980. + #define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
  17981. + #define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
  17982. +
  17983. +-
  17984. + /* hi_option_flag2 options */
  17985. + #define HI_OPTION_OFFLOAD_AMSDU 0x01
  17986. + #define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
  17987. +@@ -446,4 +447,7 @@ Fw Mode/SubMode Mask
  17988. + #define QCA988X_BOARD_DATA_SZ 7168
  17989. + #define QCA988X_BOARD_EXT_DATA_SZ 0
  17990. +
  17991. ++#define QCA6174_BOARD_DATA_SZ 8192
  17992. ++#define QCA6174_BOARD_EXT_DATA_SZ 0
  17993. ++
  17994. + #endif /* __TARGADDRS_H__ */
  17995. +--- a/drivers/net/wireless/ath/ath10k/trace.h
  17996. ++++ b/drivers/net/wireless/ath/ath10k/trace.h
  17997. +@@ -18,6 +18,16 @@
  17998. + #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
  17999. +
  18000. + #include <linux/tracepoint.h>
  18001. ++#include "core.h"
  18002. ++
  18003. ++#if !defined(_TRACE_H_)
  18004. ++static inline u32 ath10k_frm_hdr_len(const void *buf)
  18005. ++{
  18006. ++ const struct ieee80211_hdr *hdr = buf;
  18007. ++
  18008. ++ return ieee80211_hdrlen(hdr->frame_control);
  18009. ++}
  18010. ++#endif
  18011. +
  18012. + #define _TRACE_H_
  18013. +
  18014. +@@ -39,59 +49,79 @@ static inline void trace_ ## name(proto)
  18015. + #define ATH10K_MSG_MAX 200
  18016. +
  18017. + DECLARE_EVENT_CLASS(ath10k_log_event,
  18018. +- TP_PROTO(struct va_format *vaf),
  18019. +- TP_ARGS(vaf),
  18020. ++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
  18021. ++ TP_ARGS(ar, vaf),
  18022. + TP_STRUCT__entry(
  18023. ++ __string(device, dev_name(ar->dev))
  18024. ++ __string(driver, dev_driver_string(ar->dev))
  18025. + __dynamic_array(char, msg, ATH10K_MSG_MAX)
  18026. + ),
  18027. + TP_fast_assign(
  18028. ++ __assign_str(device, dev_name(ar->dev));
  18029. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18030. + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
  18031. + ATH10K_MSG_MAX,
  18032. + vaf->fmt,
  18033. + *vaf->va) >= ATH10K_MSG_MAX);
  18034. + ),
  18035. +- TP_printk("%s", __get_str(msg))
  18036. ++ TP_printk(
  18037. ++ "%s %s %s",
  18038. ++ __get_str(driver),
  18039. ++ __get_str(device),
  18040. ++ __get_str(msg)
  18041. ++ )
  18042. + );
  18043. +
  18044. + DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
  18045. +- TP_PROTO(struct va_format *vaf),
  18046. +- TP_ARGS(vaf)
  18047. ++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
  18048. ++ TP_ARGS(ar, vaf)
  18049. + );
  18050. +
  18051. + DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
  18052. +- TP_PROTO(struct va_format *vaf),
  18053. +- TP_ARGS(vaf)
  18054. ++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
  18055. ++ TP_ARGS(ar, vaf)
  18056. + );
  18057. +
  18058. + DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
  18059. +- TP_PROTO(struct va_format *vaf),
  18060. +- TP_ARGS(vaf)
  18061. ++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
  18062. ++ TP_ARGS(ar, vaf)
  18063. + );
  18064. +
  18065. + TRACE_EVENT(ath10k_log_dbg,
  18066. +- TP_PROTO(unsigned int level, struct va_format *vaf),
  18067. +- TP_ARGS(level, vaf),
  18068. ++ TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
  18069. ++ TP_ARGS(ar, level, vaf),
  18070. + TP_STRUCT__entry(
  18071. ++ __string(device, dev_name(ar->dev))
  18072. ++ __string(driver, dev_driver_string(ar->dev))
  18073. + __field(unsigned int, level)
  18074. + __dynamic_array(char, msg, ATH10K_MSG_MAX)
  18075. + ),
  18076. + TP_fast_assign(
  18077. ++ __assign_str(device, dev_name(ar->dev));
  18078. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18079. + __entry->level = level;
  18080. + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
  18081. + ATH10K_MSG_MAX,
  18082. + vaf->fmt,
  18083. + *vaf->va) >= ATH10K_MSG_MAX);
  18084. + ),
  18085. +- TP_printk("%s", __get_str(msg))
  18086. ++ TP_printk(
  18087. ++ "%s %s %s",
  18088. ++ __get_str(driver),
  18089. ++ __get_str(device),
  18090. ++ __get_str(msg)
  18091. ++ )
  18092. + );
  18093. +
  18094. + TRACE_EVENT(ath10k_log_dbg_dump,
  18095. +- TP_PROTO(const char *msg, const char *prefix,
  18096. ++ TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
  18097. + const void *buf, size_t buf_len),
  18098. +
  18099. +- TP_ARGS(msg, prefix, buf, buf_len),
  18100. ++ TP_ARGS(ar, msg, prefix, buf, buf_len),
  18101. +
  18102. + TP_STRUCT__entry(
  18103. ++ __string(device, dev_name(ar->dev))
  18104. ++ __string(driver, dev_driver_string(ar->dev))
  18105. + __string(msg, msg)
  18106. + __string(prefix, prefix)
  18107. + __field(size_t, buf_len)
  18108. +@@ -99,6 +129,8 @@ TRACE_EVENT(ath10k_log_dbg_dump,
  18109. + ),
  18110. +
  18111. + TP_fast_assign(
  18112. ++ __assign_str(device, dev_name(ar->dev));
  18113. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18114. + __assign_str(msg, msg);
  18115. + __assign_str(prefix, prefix);
  18116. + __entry->buf_len = buf_len;
  18117. +@@ -106,16 +138,23 @@ TRACE_EVENT(ath10k_log_dbg_dump,
  18118. + ),
  18119. +
  18120. + TP_printk(
  18121. +- "%s/%s\n", __get_str(prefix), __get_str(msg)
  18122. ++ "%s %s %s/%s\n",
  18123. ++ __get_str(driver),
  18124. ++ __get_str(device),
  18125. ++ __get_str(prefix),
  18126. ++ __get_str(msg)
  18127. + )
  18128. + );
  18129. +
  18130. + TRACE_EVENT(ath10k_wmi_cmd,
  18131. +- TP_PROTO(int id, void *buf, size_t buf_len, int ret),
  18132. ++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
  18133. ++ int ret),
  18134. +
  18135. +- TP_ARGS(id, buf, buf_len, ret),
  18136. ++ TP_ARGS(ar, id, buf, buf_len, ret),
  18137. +
  18138. + TP_STRUCT__entry(
  18139. ++ __string(device, dev_name(ar->dev))
  18140. ++ __string(driver, dev_driver_string(ar->dev))
  18141. + __field(unsigned int, id)
  18142. + __field(size_t, buf_len)
  18143. + __dynamic_array(u8, buf, buf_len)
  18144. +@@ -123,6 +162,8 @@ TRACE_EVENT(ath10k_wmi_cmd,
  18145. + ),
  18146. +
  18147. + TP_fast_assign(
  18148. ++ __assign_str(device, dev_name(ar->dev));
  18149. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18150. + __entry->id = id;
  18151. + __entry->buf_len = buf_len;
  18152. + __entry->ret = ret;
  18153. +@@ -130,7 +171,9 @@ TRACE_EVENT(ath10k_wmi_cmd,
  18154. + ),
  18155. +
  18156. + TP_printk(
  18157. +- "id %d len %zu ret %d",
  18158. ++ "%s %s id %d len %zu ret %d",
  18159. ++ __get_str(driver),
  18160. ++ __get_str(device),
  18161. + __entry->id,
  18162. + __entry->buf_len,
  18163. + __entry->ret
  18164. +@@ -138,71 +181,346 @@ TRACE_EVENT(ath10k_wmi_cmd,
  18165. + );
  18166. +
  18167. + TRACE_EVENT(ath10k_wmi_event,
  18168. +- TP_PROTO(int id, void *buf, size_t buf_len),
  18169. ++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
  18170. +
  18171. +- TP_ARGS(id, buf, buf_len),
  18172. ++ TP_ARGS(ar, id, buf, buf_len),
  18173. +
  18174. + TP_STRUCT__entry(
  18175. ++ __string(device, dev_name(ar->dev))
  18176. ++ __string(driver, dev_driver_string(ar->dev))
  18177. + __field(unsigned int, id)
  18178. + __field(size_t, buf_len)
  18179. + __dynamic_array(u8, buf, buf_len)
  18180. + ),
  18181. +
  18182. + TP_fast_assign(
  18183. ++ __assign_str(device, dev_name(ar->dev));
  18184. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18185. + __entry->id = id;
  18186. + __entry->buf_len = buf_len;
  18187. + memcpy(__get_dynamic_array(buf), buf, buf_len);
  18188. + ),
  18189. +
  18190. + TP_printk(
  18191. +- "id %d len %zu",
  18192. ++ "%s %s id %d len %zu",
  18193. ++ __get_str(driver),
  18194. ++ __get_str(device),
  18195. + __entry->id,
  18196. + __entry->buf_len
  18197. + )
  18198. + );
  18199. +
  18200. + TRACE_EVENT(ath10k_htt_stats,
  18201. +- TP_PROTO(void *buf, size_t buf_len),
  18202. ++ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
  18203. +
  18204. +- TP_ARGS(buf, buf_len),
  18205. ++ TP_ARGS(ar, buf, buf_len),
  18206. +
  18207. + TP_STRUCT__entry(
  18208. ++ __string(device, dev_name(ar->dev))
  18209. ++ __string(driver, dev_driver_string(ar->dev))
  18210. + __field(size_t, buf_len)
  18211. + __dynamic_array(u8, buf, buf_len)
  18212. + ),
  18213. +
  18214. + TP_fast_assign(
  18215. ++ __assign_str(device, dev_name(ar->dev));
  18216. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18217. + __entry->buf_len = buf_len;
  18218. + memcpy(__get_dynamic_array(buf), buf, buf_len);
  18219. + ),
  18220. +
  18221. + TP_printk(
  18222. +- "len %zu",
  18223. ++ "%s %s len %zu",
  18224. ++ __get_str(driver),
  18225. ++ __get_str(device),
  18226. + __entry->buf_len
  18227. + )
  18228. + );
  18229. +
  18230. + TRACE_EVENT(ath10k_wmi_dbglog,
  18231. +- TP_PROTO(void *buf, size_t buf_len),
  18232. ++ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
  18233. +
  18234. +- TP_ARGS(buf, buf_len),
  18235. ++ TP_ARGS(ar, buf, buf_len),
  18236. +
  18237. + TP_STRUCT__entry(
  18238. ++ __string(device, dev_name(ar->dev))
  18239. ++ __string(driver, dev_driver_string(ar->dev))
  18240. + __field(size_t, buf_len)
  18241. + __dynamic_array(u8, buf, buf_len)
  18242. + ),
  18243. +
  18244. + TP_fast_assign(
  18245. ++ __assign_str(device, dev_name(ar->dev));
  18246. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18247. + __entry->buf_len = buf_len;
  18248. + memcpy(__get_dynamic_array(buf), buf, buf_len);
  18249. + ),
  18250. +
  18251. + TP_printk(
  18252. +- "len %zu",
  18253. ++ "%s %s len %zu",
  18254. ++ __get_str(driver),
  18255. ++ __get_str(device),
  18256. + __entry->buf_len
  18257. + )
  18258. + );
  18259. +
  18260. ++TRACE_EVENT(ath10k_htt_pktlog,
  18261. ++ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
  18262. ++
  18263. ++ TP_ARGS(ar, buf, buf_len),
  18264. ++
  18265. ++ TP_STRUCT__entry(
  18266. ++ __string(device, dev_name(ar->dev))
  18267. ++ __string(driver, dev_driver_string(ar->dev))
  18268. ++ __field(u16, buf_len)
  18269. ++ __dynamic_array(u8, pktlog, buf_len)
  18270. ++ ),
  18271. ++
  18272. ++ TP_fast_assign(
  18273. ++ __assign_str(device, dev_name(ar->dev));
  18274. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18275. ++ __entry->buf_len = buf_len;
  18276. ++ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
  18277. ++ ),
  18278. ++
  18279. ++ TP_printk(
  18280. ++ "%s %s size %hu",
  18281. ++ __get_str(driver),
  18282. ++ __get_str(device),
  18283. ++ __entry->buf_len
  18284. ++ )
  18285. ++);
  18286. ++
  18287. ++TRACE_EVENT(ath10k_htt_tx,
  18288. ++ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
  18289. ++ u8 vdev_id, u8 tid),
  18290. ++
  18291. ++ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
  18292. ++
  18293. ++ TP_STRUCT__entry(
  18294. ++ __string(device, dev_name(ar->dev))
  18295. ++ __string(driver, dev_driver_string(ar->dev))
  18296. ++ __field(u16, msdu_id)
  18297. ++ __field(u16, msdu_len)
  18298. ++ __field(u8, vdev_id)
  18299. ++ __field(u8, tid)
  18300. ++ ),
  18301. ++
  18302. ++ TP_fast_assign(
  18303. ++ __assign_str(device, dev_name(ar->dev));
  18304. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18305. ++ __entry->msdu_id = msdu_id;
  18306. ++ __entry->msdu_len = msdu_len;
  18307. ++ __entry->vdev_id = vdev_id;
  18308. ++ __entry->tid = tid;
  18309. ++ ),
  18310. ++
  18311. ++ TP_printk(
  18312. ++ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
  18313. ++ __get_str(driver),
  18314. ++ __get_str(device),
  18315. ++ __entry->msdu_id,
  18316. ++ __entry->msdu_len,
  18317. ++ __entry->vdev_id,
  18318. ++ __entry->tid
  18319. ++ )
  18320. ++);
  18321. ++
  18322. ++TRACE_EVENT(ath10k_txrx_tx_unref,
  18323. ++ TP_PROTO(struct ath10k *ar, u16 msdu_id),
  18324. ++
  18325. ++ TP_ARGS(ar, msdu_id),
  18326. ++
  18327. ++ TP_STRUCT__entry(
  18328. ++ __string(device, dev_name(ar->dev))
  18329. ++ __string(driver, dev_driver_string(ar->dev))
  18330. ++ __field(u16, msdu_id)
  18331. ++ ),
  18332. ++
  18333. ++ TP_fast_assign(
  18334. ++ __assign_str(device, dev_name(ar->dev));
  18335. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18336. ++ __entry->msdu_id = msdu_id;
  18337. ++ ),
  18338. ++
  18339. ++ TP_printk(
  18340. ++ "%s %s msdu_id %d",
  18341. ++ __get_str(driver),
  18342. ++ __get_str(device),
  18343. ++ __entry->msdu_id
  18344. ++ )
  18345. ++);
  18346. ++
  18347. ++DECLARE_EVENT_CLASS(ath10k_hdr_event,
  18348. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18349. ++
  18350. ++ TP_ARGS(ar, data, len),
  18351. ++
  18352. ++ TP_STRUCT__entry(
  18353. ++ __string(device, dev_name(ar->dev))
  18354. ++ __string(driver, dev_driver_string(ar->dev))
  18355. ++ __field(size_t, len)
  18356. ++ __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
  18357. ++ ),
  18358. ++
  18359. ++ TP_fast_assign(
  18360. ++ __assign_str(device, dev_name(ar->dev));
  18361. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18362. ++ __entry->len = ath10k_frm_hdr_len(data);
  18363. ++ memcpy(__get_dynamic_array(data), data, __entry->len);
  18364. ++ ),
  18365. ++
  18366. ++ TP_printk(
  18367. ++ "%s %s len %zu\n",
  18368. ++ __get_str(driver),
  18369. ++ __get_str(device),
  18370. ++ __entry->len
  18371. ++ )
  18372. ++);
  18373. ++
  18374. ++DECLARE_EVENT_CLASS(ath10k_payload_event,
  18375. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18376. ++
  18377. ++ TP_ARGS(ar, data, len),
  18378. ++
  18379. ++ TP_STRUCT__entry(
  18380. ++ __string(device, dev_name(ar->dev))
  18381. ++ __string(driver, dev_driver_string(ar->dev))
  18382. ++ __field(size_t, len)
  18383. ++ __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
  18384. ++ ),
  18385. ++
  18386. ++ TP_fast_assign(
  18387. ++ __assign_str(device, dev_name(ar->dev));
  18388. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18389. ++ __entry->len = len - ath10k_frm_hdr_len(data);
  18390. ++ memcpy(__get_dynamic_array(payload),
  18391. ++ data + ath10k_frm_hdr_len(data), __entry->len);
  18392. ++ ),
  18393. ++
  18394. ++ TP_printk(
  18395. ++ "%s %s len %zu\n",
  18396. ++ __get_str(driver),
  18397. ++ __get_str(device),
  18398. ++ __entry->len
  18399. ++ )
  18400. ++);
  18401. ++
  18402. ++DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
  18403. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18404. ++ TP_ARGS(ar, data, len)
  18405. ++);
  18406. ++
  18407. ++DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
  18408. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18409. ++ TP_ARGS(ar, data, len)
  18410. ++);
  18411. ++
  18412. ++DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
  18413. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18414. ++ TP_ARGS(ar, data, len)
  18415. ++);
  18416. ++
  18417. ++DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
  18418. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18419. ++ TP_ARGS(ar, data, len)
  18420. ++);
  18421. ++
  18422. ++TRACE_EVENT(ath10k_htt_rx_desc,
  18423. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18424. ++
  18425. ++ TP_ARGS(ar, data, len),
  18426. ++
  18427. ++ TP_STRUCT__entry(
  18428. ++ __string(device, dev_name(ar->dev))
  18429. ++ __string(driver, dev_driver_string(ar->dev))
  18430. ++ __field(u16, len)
  18431. ++ __dynamic_array(u8, rxdesc, len)
  18432. ++ ),
  18433. ++
  18434. ++ TP_fast_assign(
  18435. ++ __assign_str(device, dev_name(ar->dev));
  18436. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18437. ++ __entry->len = len;
  18438. ++ memcpy(__get_dynamic_array(rxdesc), data, len);
  18439. ++ ),
  18440. ++
  18441. ++ TP_printk(
  18442. ++ "%s %s rxdesc len %d",
  18443. ++ __get_str(driver),
  18444. ++ __get_str(device),
  18445. ++ __entry->len
  18446. ++ )
  18447. ++);
  18448. ++
  18449. ++TRACE_EVENT(ath10k_wmi_diag_container,
  18450. ++ TP_PROTO(struct ath10k *ar,
  18451. ++ u8 type,
  18452. ++ u32 timestamp,
  18453. ++ u32 code,
  18454. ++ u16 len,
  18455. ++ const void *data),
  18456. ++
  18457. ++ TP_ARGS(ar, type, timestamp, code, len, data),
  18458. ++
  18459. ++ TP_STRUCT__entry(
  18460. ++ __string(device, dev_name(ar->dev))
  18461. ++ __string(driver, dev_driver_string(ar->dev))
  18462. ++ __field(u8, type)
  18463. ++ __field(u32, timestamp)
  18464. ++ __field(u32, code)
  18465. ++ __field(u16, len)
  18466. ++ __dynamic_array(u8, data, len)
  18467. ++ ),
  18468. ++
  18469. ++ TP_fast_assign(
  18470. ++ __assign_str(device, dev_name(ar->dev));
  18471. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18472. ++ __entry->type = type;
  18473. ++ __entry->timestamp = timestamp;
  18474. ++ __entry->code = code;
  18475. ++ __entry->len = len;
  18476. ++ memcpy(__get_dynamic_array(data), data, len);
  18477. ++ ),
  18478. ++
  18479. ++ TP_printk(
  18480. ++ "%s %s diag container type %hhu timestamp %u code %u len %d",
  18481. ++ __get_str(driver),
  18482. ++ __get_str(device),
  18483. ++ __entry->type,
  18484. ++ __entry->timestamp,
  18485. ++ __entry->code,
  18486. ++ __entry->len
  18487. ++ )
  18488. ++);
  18489. ++
  18490. ++TRACE_EVENT(ath10k_wmi_diag,
  18491. ++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
  18492. ++
  18493. ++ TP_ARGS(ar, data, len),
  18494. ++
  18495. ++ TP_STRUCT__entry(
  18496. ++ __string(device, dev_name(ar->dev))
  18497. ++ __string(driver, dev_driver_string(ar->dev))
  18498. ++ __field(u16, len)
  18499. ++ __dynamic_array(u8, data, len)
  18500. ++ ),
  18501. ++
  18502. ++ TP_fast_assign(
  18503. ++ __assign_str(device, dev_name(ar->dev));
  18504. ++ __assign_str(driver, dev_driver_string(ar->dev));
  18505. ++ __entry->len = len;
  18506. ++ memcpy(__get_dynamic_array(data), data, len);
  18507. ++ ),
  18508. ++
  18509. ++ TP_printk(
  18510. ++ "%s %s tlv diag len %d",
  18511. ++ __get_str(driver),
  18512. ++ __get_str(device),
  18513. ++ __entry->len
  18514. ++ )
  18515. ++);
  18516. ++
  18517. + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
  18518. +
  18519. + /* we don't want to use include/trace/events */
  18520. +--- a/drivers/net/wireless/ath/ath10k/txrx.c
  18521. ++++ b/drivers/net/wireless/ath/ath10k/txrx.c
  18522. +@@ -32,14 +32,14 @@ static void ath10k_report_offchan_tx(str
  18523. + * offchan_tx_skb. */
  18524. + spin_lock_bh(&ar->data_lock);
  18525. + if (ar->offchan_tx_skb != skb) {
  18526. +- ath10k_warn("completed old offchannel frame\n");
  18527. ++ ath10k_warn(ar, "completed old offchannel frame\n");
  18528. + goto out;
  18529. + }
  18530. +
  18531. + complete(&ar->offchan_tx_completed);
  18532. + ar->offchan_tx_skb = NULL; /* just for sanity */
  18533. +
  18534. +- ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
  18535. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
  18536. + out:
  18537. + spin_unlock_bh(&ar->data_lock);
  18538. + }
  18539. +@@ -47,23 +47,30 @@ out:
  18540. + void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
  18541. + const struct htt_tx_done *tx_done)
  18542. + {
  18543. +- struct device *dev = htt->ar->dev;
  18544. ++ struct ath10k *ar = htt->ar;
  18545. ++ struct device *dev = ar->dev;
  18546. + struct ieee80211_tx_info *info;
  18547. + struct ath10k_skb_cb *skb_cb;
  18548. + struct sk_buff *msdu;
  18549. +
  18550. + lockdep_assert_held(&htt->tx_lock);
  18551. +
  18552. +- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
  18553. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
  18554. + tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
  18555. +
  18556. + if (tx_done->msdu_id >= htt->max_num_pending_tx) {
  18557. +- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
  18558. ++ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
  18559. ++ tx_done->msdu_id);
  18560. ++ return;
  18561. ++ }
  18562. ++
  18563. ++ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
  18564. ++ if (!msdu) {
  18565. ++ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
  18566. + tx_done->msdu_id);
  18567. + return;
  18568. + }
  18569. +
  18570. +- msdu = htt->pending_tx[tx_done->msdu_id];
  18571. + skb_cb = ATH10K_SKB_CB(msdu);
  18572. +
  18573. + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
  18574. +@@ -77,6 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_
  18575. +
  18576. + info = IEEE80211_SKB_CB(msdu);
  18577. + memset(&info->status, 0, sizeof(info->status));
  18578. ++ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
  18579. +
  18580. + if (tx_done->discard) {
  18581. + ieee80211_free_txskb(htt->ar->hw, msdu);
  18582. +@@ -93,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_
  18583. + /* we do not own the msdu anymore */
  18584. +
  18585. + exit:
  18586. +- htt->pending_tx[tx_done->msdu_id] = NULL;
  18587. + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
  18588. + __ath10k_htt_tx_dec_pending(htt);
  18589. + if (htt->num_pending_tx == 0)
  18590. +@@ -119,8 +126,7 @@ struct ath10k_peer *ath10k_peer_find(str
  18591. + return NULL;
  18592. + }
  18593. +
  18594. +-static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
  18595. +- int peer_id)
  18596. ++struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
  18597. + {
  18598. + struct ath10k_peer *peer;
  18599. +
  18600. +@@ -145,7 +151,8 @@ static int ath10k_wait_for_peer_common(s
  18601. + mapped = !!ath10k_peer_find(ar, vdev_id, addr);
  18602. + spin_unlock_bh(&ar->data_lock);
  18603. +
  18604. +- mapped == expect_mapped;
  18605. ++ (mapped == expect_mapped ||
  18606. ++ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
  18607. + }), 3*HZ);
  18608. +
  18609. + if (ret <= 0)
  18610. +@@ -178,12 +185,12 @@ void ath10k_peer_map_event(struct ath10k
  18611. + goto exit;
  18612. +
  18613. + peer->vdev_id = ev->vdev_id;
  18614. +- memcpy(peer->addr, ev->addr, ETH_ALEN);
  18615. ++ ether_addr_copy(peer->addr, ev->addr);
  18616. + list_add(&peer->list, &ar->peers);
  18617. + wake_up(&ar->peer_mapping_wq);
  18618. + }
  18619. +
  18620. +- ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
  18621. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
  18622. + ev->vdev_id, ev->addr, ev->peer_id);
  18623. +
  18624. + set_bit(ev->peer_id, peer->peer_ids);
  18625. +@@ -200,12 +207,12 @@ void ath10k_peer_unmap_event(struct ath1
  18626. + spin_lock_bh(&ar->data_lock);
  18627. + peer = ath10k_peer_find_by_id(ar, ev->peer_id);
  18628. + if (!peer) {
  18629. +- ath10k_warn("peer-unmap-event: unknown peer id %d\n",
  18630. ++ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
  18631. + ev->peer_id);
  18632. + goto exit;
  18633. + }
  18634. +
  18635. +- ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
  18636. ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
  18637. + peer->vdev_id, peer->addr, ev->peer_id);
  18638. +
  18639. + clear_bit(ev->peer_id, peer->peer_ids);
  18640. +--- a/drivers/net/wireless/ath/ath10k/txrx.h
  18641. ++++ b/drivers/net/wireless/ath/ath10k/txrx.h
  18642. +@@ -24,6 +24,7 @@ void ath10k_txrx_tx_unref(struct ath10k_
  18643. +
  18644. + struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
  18645. + const u8 *addr);
  18646. ++struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
  18647. + int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
  18648. + const u8 *addr);
  18649. + int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
  18650. +--- a/drivers/net/wireless/ath/ath10k/wmi.c
  18651. ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
  18652. +@@ -22,7 +22,10 @@
  18653. + #include "htc.h"
  18654. + #include "debug.h"
  18655. + #include "wmi.h"
  18656. ++#include "wmi-tlv.h"
  18657. + #include "mac.h"
  18658. ++#include "testmode.h"
  18659. ++#include "wmi-ops.h"
  18660. +
  18661. + /* MAIN WMI cmd track */
  18662. + static struct wmi_cmd_map wmi_cmd_map = {
  18663. +@@ -142,6 +145,7 @@ static struct wmi_cmd_map wmi_cmd_map =
  18664. + .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
  18665. + .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
  18666. + .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
  18667. ++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  18668. + };
  18669. +
  18670. + /* 10.X WMI cmd track */
  18671. +@@ -264,6 +268,129 @@ static struct wmi_cmd_map wmi_10x_cmd_ma
  18672. + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  18673. + .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
  18674. + .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
  18675. ++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  18676. ++};
  18677. ++
  18678. ++/* 10.2.4 WMI cmd track */
  18679. ++static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
  18680. ++ .init_cmdid = WMI_10_2_INIT_CMDID,
  18681. ++ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  18682. ++ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  18683. ++ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  18684. ++ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  18685. ++ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  18686. ++ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  18687. ++ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  18688. ++ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  18689. ++ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  18690. ++ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  18691. ++ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  18692. ++ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  18693. ++ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  18694. ++ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  18695. ++ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  18696. ++ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  18697. ++ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  18698. ++ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  18699. ++ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  18700. ++ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  18701. ++ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  18702. ++ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  18703. ++ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  18704. ++ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  18705. ++ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  18706. ++ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  18707. ++ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  18708. ++ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  18709. ++ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  18710. ++ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  18711. ++ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  18712. ++ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  18713. ++ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  18714. ++ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  18715. ++ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  18716. ++ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  18717. ++ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  18718. ++ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  18719. ++ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  18720. ++ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  18721. ++ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  18722. ++ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  18723. ++ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  18724. ++ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  18725. ++ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  18726. ++ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  18727. ++ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  18728. ++ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  18729. ++ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  18730. ++ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  18731. ++ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  18732. ++ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  18733. ++ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  18734. ++ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  18735. ++ .roam_scan_rssi_change_threshold =
  18736. ++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  18737. ++ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  18738. ++ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  18739. ++ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  18740. ++ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  18741. ++ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  18742. ++ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  18743. ++ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  18744. ++ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  18745. ++ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  18746. ++ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  18747. ++ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  18748. ++ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  18749. ++ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  18750. ++ .wlan_profile_set_hist_intvl_cmdid =
  18751. ++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  18752. ++ .wlan_profile_get_profile_data_cmdid =
  18753. ++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  18754. ++ .wlan_profile_enable_profile_id_cmdid =
  18755. ++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  18756. ++ .wlan_profile_list_profile_id_cmdid =
  18757. ++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  18758. ++ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  18759. ++ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  18760. ++ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  18761. ++ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  18762. ++ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  18763. ++ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  18764. ++ .wow_enable_disable_wake_event_cmdid =
  18765. ++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  18766. ++ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  18767. ++ .wow_hostwakeup_from_sleep_cmdid =
  18768. ++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  18769. ++ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  18770. ++ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  18771. ++ .vdev_spectral_scan_configure_cmdid =
  18772. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  18773. ++ .vdev_spectral_scan_enable_cmdid =
  18774. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  18775. ++ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  18776. ++ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  18777. ++ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  18778. ++ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  18779. ++ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  18780. ++ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  18781. ++ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  18782. ++ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  18783. ++ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  18784. ++ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  18785. ++ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  18786. ++ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  18787. ++ .echo_cmdid = WMI_10_2_ECHO_CMDID,
  18788. ++ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  18789. ++ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  18790. ++ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  18791. ++ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  18792. ++ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  18793. ++ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  18794. ++ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  18795. ++ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  18796. ++ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  18797. ++ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
  18798. + };
  18799. +
  18800. + /* MAIN WMI VDEV param map */
  18801. +@@ -384,6 +511,64 @@ static struct wmi_vdev_param_map wmi_10x
  18802. + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  18803. + };
  18804. +
  18805. ++static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
  18806. ++ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  18807. ++ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  18808. ++ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  18809. ++ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  18810. ++ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  18811. ++ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  18812. ++ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  18813. ++ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  18814. ++ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  18815. ++ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  18816. ++ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  18817. ++ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  18818. ++ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  18819. ++ .wmi_vdev_oc_scheduler_air_time_limit =
  18820. ++ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  18821. ++ .wds = WMI_10X_VDEV_PARAM_WDS,
  18822. ++ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  18823. ++ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  18824. ++ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  18825. ++ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  18826. ++ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  18827. ++ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  18828. ++ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  18829. ++ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  18830. ++ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  18831. ++ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  18832. ++ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  18833. ++ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  18834. ++ .sgi = WMI_10X_VDEV_PARAM_SGI,
  18835. ++ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  18836. ++ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  18837. ++ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  18838. ++ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  18839. ++ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  18840. ++ .nss = WMI_10X_VDEV_PARAM_NSS,
  18841. ++ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  18842. ++ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  18843. ++ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  18844. ++ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  18845. ++ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  18846. ++ .ap_keepalive_min_idle_inactive_time_secs =
  18847. ++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  18848. ++ .ap_keepalive_max_idle_inactive_time_secs =
  18849. ++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  18850. ++ .ap_keepalive_max_unresponsive_time_secs =
  18851. ++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  18852. ++ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  18853. ++ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  18854. ++ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  18855. ++ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  18856. ++ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  18857. ++ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  18858. ++ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  18859. ++ .ap_detect_out_of_sync_sleeping_sta_time_secs =
  18860. ++ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  18861. ++};
  18862. ++
  18863. + static struct wmi_pdev_param_map wmi_pdev_param_map = {
  18864. + .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
  18865. + .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
  18866. +@@ -433,6 +618,7 @@ static struct wmi_pdev_param_map wmi_pde
  18867. + .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  18868. + .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
  18869. + .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  18870. ++ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
  18871. + };
  18872. +
  18873. + static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
  18874. +@@ -485,11 +671,221 @@ static struct wmi_pdev_param_map wmi_10x
  18875. + .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  18876. + .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  18877. + .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  18878. ++ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
  18879. ++};
  18880. ++
  18881. ++static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
  18882. ++ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  18883. ++ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  18884. ++ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  18885. ++ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  18886. ++ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  18887. ++ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  18888. ++ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  18889. ++ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  18890. ++ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  18891. ++ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  18892. ++ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  18893. ++ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  18894. ++ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  18895. ++ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  18896. ++ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  18897. ++ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  18898. ++ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  18899. ++ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  18900. ++ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  18901. ++ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  18902. ++ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  18903. ++ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  18904. ++ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  18905. ++ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  18906. ++ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  18907. ++ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  18908. ++ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  18909. ++ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  18910. ++ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  18911. ++ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  18912. ++ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  18913. ++ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  18914. ++ .bcnflt_stats_update_period =
  18915. ++ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  18916. ++ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  18917. ++ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  18918. ++ .dcs = WMI_10X_PDEV_PARAM_DCS,
  18919. ++ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  18920. ++ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  18921. ++ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  18922. ++ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  18923. ++ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  18924. ++ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  18925. ++ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  18926. ++ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  18927. ++ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  18928. ++ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  18929. ++ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  18930. ++ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  18931. ++ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
  18932. ++};
  18933. ++
  18934. ++/* firmware 10.2 specific mappings */
  18935. ++static struct wmi_cmd_map wmi_10_2_cmd_map = {
  18936. ++ .init_cmdid = WMI_10_2_INIT_CMDID,
  18937. ++ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  18938. ++ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  18939. ++ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  18940. ++ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  18941. ++ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  18942. ++ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  18943. ++ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  18944. ++ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  18945. ++ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  18946. ++ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  18947. ++ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  18948. ++ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  18949. ++ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  18950. ++ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  18951. ++ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  18952. ++ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  18953. ++ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  18954. ++ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  18955. ++ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  18956. ++ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  18957. ++ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  18958. ++ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  18959. ++ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  18960. ++ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  18961. ++ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  18962. ++ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  18963. ++ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  18964. ++ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  18965. ++ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  18966. ++ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  18967. ++ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  18968. ++ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  18969. ++ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  18970. ++ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  18971. ++ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  18972. ++ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  18973. ++ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  18974. ++ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  18975. ++ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  18976. ++ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  18977. ++ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  18978. ++ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  18979. ++ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  18980. ++ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  18981. ++ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  18982. ++ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  18983. ++ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  18984. ++ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  18985. ++ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  18986. ++ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  18987. ++ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  18988. ++ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  18989. ++ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  18990. ++ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  18991. ++ .roam_scan_rssi_change_threshold =
  18992. ++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  18993. ++ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  18994. ++ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  18995. ++ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  18996. ++ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  18997. ++ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  18998. ++ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  18999. ++ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  19000. ++ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  19001. ++ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  19002. ++ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  19003. ++ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  19004. ++ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  19005. ++ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  19006. ++ .wlan_profile_set_hist_intvl_cmdid =
  19007. ++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  19008. ++ .wlan_profile_get_profile_data_cmdid =
  19009. ++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  19010. ++ .wlan_profile_enable_profile_id_cmdid =
  19011. ++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  19012. ++ .wlan_profile_list_profile_id_cmdid =
  19013. ++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  19014. ++ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  19015. ++ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  19016. ++ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  19017. ++ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  19018. ++ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  19019. ++ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  19020. ++ .wow_enable_disable_wake_event_cmdid =
  19021. ++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  19022. ++ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  19023. ++ .wow_hostwakeup_from_sleep_cmdid =
  19024. ++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  19025. ++ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  19026. ++ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  19027. ++ .vdev_spectral_scan_configure_cmdid =
  19028. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  19029. ++ .vdev_spectral_scan_enable_cmdid =
  19030. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  19031. ++ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  19032. ++ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  19033. ++ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  19034. ++ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  19035. ++ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  19036. ++ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  19037. ++ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  19038. ++ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  19039. ++ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  19040. ++ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  19041. ++ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  19042. ++ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  19043. ++ .echo_cmdid = WMI_10_2_ECHO_CMDID,
  19044. ++ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  19045. ++ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  19046. ++ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  19047. ++ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  19048. ++ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  19049. ++ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  19050. ++ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  19051. ++ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  19052. ++ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  19053. ++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  19054. + };
  19055. +
  19056. ++void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
  19057. ++ const struct wmi_channel_arg *arg)
  19058. ++{
  19059. ++ u32 flags = 0;
  19060. ++
  19061. ++ memset(ch, 0, sizeof(*ch));
  19062. ++
  19063. ++ if (arg->passive)
  19064. ++ flags |= WMI_CHAN_FLAG_PASSIVE;
  19065. ++ if (arg->allow_ibss)
  19066. ++ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  19067. ++ if (arg->allow_ht)
  19068. ++ flags |= WMI_CHAN_FLAG_ALLOW_HT;
  19069. ++ if (arg->allow_vht)
  19070. ++ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  19071. ++ if (arg->ht40plus)
  19072. ++ flags |= WMI_CHAN_FLAG_HT40_PLUS;
  19073. ++ if (arg->chan_radar)
  19074. ++ flags |= WMI_CHAN_FLAG_DFS;
  19075. ++
  19076. ++ ch->mhz = __cpu_to_le32(arg->freq);
  19077. ++ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
  19078. ++ ch->band_center_freq2 = 0;
  19079. ++ ch->min_power = arg->min_power;
  19080. ++ ch->max_power = arg->max_power;
  19081. ++ ch->reg_power = arg->max_reg_power;
  19082. ++ ch->antenna_max = arg->max_antenna_gain;
  19083. ++
  19084. ++ /* mode & flags share storage */
  19085. ++ ch->mode = arg->mode;
  19086. ++ ch->flags |= __cpu_to_le32(flags);
  19087. ++}
  19088. ++
  19089. + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  19090. + {
  19091. + int ret;
  19092. ++
  19093. + ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  19094. + WMI_SERVICE_READY_TIMEOUT_HZ);
  19095. + return ret;
  19096. +@@ -498,23 +894,24 @@ int ath10k_wmi_wait_for_service_ready(st
  19097. + int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  19098. + {
  19099. + int ret;
  19100. ++
  19101. + ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  19102. + WMI_UNIFIED_READY_TIMEOUT_HZ);
  19103. + return ret;
  19104. + }
  19105. +
  19106. +-static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
  19107. ++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
  19108. + {
  19109. + struct sk_buff *skb;
  19110. + u32 round_len = roundup(len, 4);
  19111. +
  19112. +- skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
  19113. ++ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
  19114. + if (!skb)
  19115. + return NULL;
  19116. +
  19117. + skb_reserve(skb, WMI_SKB_HEADROOM);
  19118. + if (!IS_ALIGNED((unsigned long)skb->data, 4))
  19119. +- ath10k_warn("Unaligned WMI skb\n");
  19120. ++ ath10k_warn(ar, "Unaligned WMI skb\n");
  19121. +
  19122. + skb_put(skb, round_len);
  19123. + memset(skb->data, 0, round_len);
  19124. +@@ -527,8 +924,8 @@ static void ath10k_wmi_htc_tx_complete(s
  19125. + dev_kfree_skb(skb);
  19126. + }
  19127. +
  19128. +-static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  19129. +- u32 cmd_id)
  19130. ++int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  19131. ++ u32 cmd_id)
  19132. + {
  19133. + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  19134. + struct wmi_cmd_hdr *cmd_hdr;
  19135. +@@ -545,7 +942,7 @@ static int ath10k_wmi_cmd_send_nowait(st
  19136. +
  19137. + memset(skb_cb, 0, sizeof(*skb_cb));
  19138. + ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
  19139. +- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
  19140. ++ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
  19141. +
  19142. + if (ret)
  19143. + goto err_pull;
  19144. +@@ -559,23 +956,45 @@ err_pull:
  19145. +
  19146. + static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
  19147. + {
  19148. ++ struct ath10k *ar = arvif->ar;
  19149. ++ struct ath10k_skb_cb *cb;
  19150. ++ struct sk_buff *bcn;
  19151. + int ret;
  19152. +
  19153. +- lockdep_assert_held(&arvif->ar->data_lock);
  19154. ++ spin_lock_bh(&ar->data_lock);
  19155. +
  19156. +- if (arvif->beacon == NULL)
  19157. +- return;
  19158. ++ bcn = arvif->beacon;
  19159. +
  19160. +- if (arvif->beacon_sent)
  19161. +- return;
  19162. ++ if (!bcn)
  19163. ++ goto unlock;
  19164. +
  19165. +- ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
  19166. +- if (ret)
  19167. +- return;
  19168. ++ cb = ATH10K_SKB_CB(bcn);
  19169. ++
  19170. ++ switch (arvif->beacon_state) {
  19171. ++ case ATH10K_BEACON_SENDING:
  19172. ++ case ATH10K_BEACON_SENT:
  19173. ++ break;
  19174. ++ case ATH10K_BEACON_SCHEDULED:
  19175. ++ arvif->beacon_state = ATH10K_BEACON_SENDING;
  19176. ++ spin_unlock_bh(&ar->data_lock);
  19177. ++
  19178. ++ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
  19179. ++ arvif->vdev_id,
  19180. ++ bcn->data, bcn->len,
  19181. ++ cb->paddr,
  19182. ++ cb->bcn.dtim_zero,
  19183. ++ cb->bcn.deliver_cab);
  19184. ++
  19185. ++ spin_lock_bh(&ar->data_lock);
  19186. +
  19187. +- /* We need to retain the arvif->beacon reference for DMA unmapping and
  19188. +- * freeing the skbuff later. */
  19189. +- arvif->beacon_sent = true;
  19190. ++ if (ret == 0)
  19191. ++ arvif->beacon_state = ATH10K_BEACON_SENT;
  19192. ++ else
  19193. ++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
  19194. ++ }
  19195. ++
  19196. ++unlock:
  19197. ++ spin_unlock_bh(&ar->data_lock);
  19198. + }
  19199. +
  19200. + static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
  19201. +@@ -588,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(v
  19202. +
  19203. + static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
  19204. + {
  19205. +- spin_lock_bh(&ar->data_lock);
  19206. + ieee80211_iterate_active_interfaces_atomic(ar->hw,
  19207. + IEEE80211_IFACE_ITER_NORMAL,
  19208. + ath10k_wmi_tx_beacons_iter,
  19209. + NULL);
  19210. +- spin_unlock_bh(&ar->data_lock);
  19211. + }
  19212. +
  19213. + static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
  19214. +@@ -604,15 +1021,14 @@ static void ath10k_wmi_op_ep_tx_credits(
  19215. + wake_up(&ar->wmi.tx_credits_wq);
  19216. + }
  19217. +
  19218. +-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
  19219. +- u32 cmd_id)
  19220. ++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
  19221. + {
  19222. + int ret = -EOPNOTSUPP;
  19223. +
  19224. + might_sleep();
  19225. +
  19226. + if (cmd_id == WMI_CMD_UNSUPPORTED) {
  19227. +- ath10k_warn("wmi command %d is not supported by firmware\n",
  19228. ++ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
  19229. + cmd_id);
  19230. + return ret;
  19231. + }
  19232. +@@ -622,6 +1038,10 @@ static int ath10k_wmi_cmd_send(struct at
  19233. + ath10k_wmi_tx_beacons_nowait(ar);
  19234. +
  19235. + ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
  19236. ++
  19237. ++ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  19238. ++ ret = -ESHUTDOWN;
  19239. ++
  19240. + (ret != -EAGAIN);
  19241. + }), 3*HZ);
  19242. +
  19243. +@@ -631,147 +1051,270 @@ static int ath10k_wmi_cmd_send(struct at
  19244. + return ret;
  19245. + }
  19246. +
  19247. +-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
  19248. ++static struct sk_buff *
  19249. ++ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  19250. + {
  19251. +- int ret = 0;
  19252. + struct wmi_mgmt_tx_cmd *cmd;
  19253. + struct ieee80211_hdr *hdr;
  19254. +- struct sk_buff *wmi_skb;
  19255. +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  19256. ++ struct sk_buff *skb;
  19257. + int len;
  19258. ++ u32 buf_len = msdu->len;
  19259. + u16 fc;
  19260. +
  19261. +- hdr = (struct ieee80211_hdr *)skb->data;
  19262. ++ hdr = (struct ieee80211_hdr *)msdu->data;
  19263. + fc = le16_to_cpu(hdr->frame_control);
  19264. +
  19265. + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
  19266. +- return -EINVAL;
  19267. ++ return ERR_PTR(-EINVAL);
  19268. ++
  19269. ++ len = sizeof(cmd->hdr) + msdu->len;
  19270. ++
  19271. ++ if ((ieee80211_is_action(hdr->frame_control) ||
  19272. ++ ieee80211_is_deauth(hdr->frame_control) ||
  19273. ++ ieee80211_is_disassoc(hdr->frame_control)) &&
  19274. ++ ieee80211_has_protected(hdr->frame_control)) {
  19275. ++ len += IEEE80211_CCMP_MIC_LEN;
  19276. ++ buf_len += IEEE80211_CCMP_MIC_LEN;
  19277. ++ }
  19278. +
  19279. +- len = sizeof(cmd->hdr) + skb->len;
  19280. + len = round_up(len, 4);
  19281. +
  19282. +- wmi_skb = ath10k_wmi_alloc_skb(len);
  19283. +- if (!wmi_skb)
  19284. +- return -ENOMEM;
  19285. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  19286. ++ if (!skb)
  19287. ++ return ERR_PTR(-ENOMEM);
  19288. +
  19289. +- cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
  19290. ++ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
  19291. +
  19292. +- cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
  19293. ++ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
  19294. + cmd->hdr.tx_rate = 0;
  19295. + cmd->hdr.tx_power = 0;
  19296. +- cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
  19297. ++ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
  19298. +
  19299. +- memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
  19300. +- memcpy(cmd->buf, skb->data, skb->len);
  19301. ++ ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
  19302. ++ memcpy(cmd->buf, msdu->data, msdu->len);
  19303. +
  19304. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  19305. +- wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
  19306. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  19307. ++ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
  19308. + fc & IEEE80211_FCTL_STYPE);
  19309. ++ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
  19310. ++ trace_ath10k_tx_payload(ar, skb->data, skb->len);
  19311. +
  19312. +- /* Send the management frame buffer to the target */
  19313. +- ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
  19314. +- if (ret)
  19315. +- return ret;
  19316. +-
  19317. +- /* TODO: report tx status to mac80211 - temporary just ACK */
  19318. +- info->flags |= IEEE80211_TX_STAT_ACK;
  19319. +- ieee80211_tx_status_irqsafe(ar->hw, skb);
  19320. +-
  19321. +- return ret;
  19322. ++ return skb;
  19323. + }
  19324. +
  19325. +-static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  19326. ++static void ath10k_wmi_event_scan_started(struct ath10k *ar)
  19327. + {
  19328. +- struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
  19329. +- enum wmi_scan_event_type event_type;
  19330. +- enum wmi_scan_completion_reason reason;
  19331. +- u32 freq;
  19332. +- u32 req_id;
  19333. +- u32 scan_id;
  19334. +- u32 vdev_id;
  19335. +-
  19336. +- event_type = __le32_to_cpu(event->event_type);
  19337. +- reason = __le32_to_cpu(event->reason);
  19338. +- freq = __le32_to_cpu(event->channel_freq);
  19339. +- req_id = __le32_to_cpu(event->scan_req_id);
  19340. +- scan_id = __le32_to_cpu(event->scan_id);
  19341. +- vdev_id = __le32_to_cpu(event->vdev_id);
  19342. +-
  19343. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
  19344. +- ath10k_dbg(ATH10K_DBG_WMI,
  19345. +- "scan event type %d reason %d freq %d req_id %d "
  19346. +- "scan_id %d vdev_id %d\n",
  19347. +- event_type, reason, freq, req_id, scan_id, vdev_id);
  19348. ++ lockdep_assert_held(&ar->data_lock);
  19349. +
  19350. +- spin_lock_bh(&ar->data_lock);
  19351. ++ switch (ar->scan.state) {
  19352. ++ case ATH10K_SCAN_IDLE:
  19353. ++ case ATH10K_SCAN_RUNNING:
  19354. ++ case ATH10K_SCAN_ABORTING:
  19355. ++ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
  19356. ++ ath10k_scan_state_str(ar->scan.state),
  19357. ++ ar->scan.state);
  19358. ++ break;
  19359. ++ case ATH10K_SCAN_STARTING:
  19360. ++ ar->scan.state = ATH10K_SCAN_RUNNING;
  19361. +
  19362. +- switch (event_type) {
  19363. +- case WMI_SCAN_EVENT_STARTED:
  19364. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
  19365. +- if (ar->scan.in_progress && ar->scan.is_roc)
  19366. ++ if (ar->scan.is_roc)
  19367. + ieee80211_ready_on_channel(ar->hw);
  19368. +
  19369. + complete(&ar->scan.started);
  19370. + break;
  19371. +- case WMI_SCAN_EVENT_COMPLETED:
  19372. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
  19373. +- switch (reason) {
  19374. +- case WMI_SCAN_REASON_COMPLETED:
  19375. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
  19376. +- break;
  19377. +- case WMI_SCAN_REASON_CANCELLED:
  19378. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
  19379. +- break;
  19380. +- case WMI_SCAN_REASON_PREEMPTED:
  19381. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
  19382. +- break;
  19383. +- case WMI_SCAN_REASON_TIMEDOUT:
  19384. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
  19385. +- break;
  19386. +- default:
  19387. +- break;
  19388. +- }
  19389. +-
  19390. +- ar->scan_channel = NULL;
  19391. +- if (!ar->scan.in_progress) {
  19392. +- ath10k_warn("no scan requested, ignoring\n");
  19393. +- break;
  19394. +- }
  19395. +-
  19396. +- if (ar->scan.is_roc) {
  19397. +- ath10k_offchan_tx_purge(ar);
  19398. ++ }
  19399. ++}
  19400. +
  19401. +- if (!ar->scan.aborting)
  19402. +- ieee80211_remain_on_channel_expired(ar->hw);
  19403. +- } else {
  19404. +- ieee80211_scan_completed(ar->hw, ar->scan.aborting);
  19405. +- }
  19406. ++static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
  19407. ++{
  19408. ++ lockdep_assert_held(&ar->data_lock);
  19409. +
  19410. +- del_timer(&ar->scan.timeout);
  19411. +- complete_all(&ar->scan.completed);
  19412. +- ar->scan.in_progress = false;
  19413. ++ switch (ar->scan.state) {
  19414. ++ case ATH10K_SCAN_IDLE:
  19415. ++ case ATH10K_SCAN_RUNNING:
  19416. ++ case ATH10K_SCAN_ABORTING:
  19417. ++ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
  19418. ++ ath10k_scan_state_str(ar->scan.state),
  19419. ++ ar->scan.state);
  19420. + break;
  19421. +- case WMI_SCAN_EVENT_BSS_CHANNEL:
  19422. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
  19423. +- ar->scan_channel = NULL;
  19424. ++ case ATH10K_SCAN_STARTING:
  19425. ++ complete(&ar->scan.started);
  19426. ++ __ath10k_scan_finish(ar);
  19427. + break;
  19428. +- case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  19429. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
  19430. ++ }
  19431. ++}
  19432. ++
  19433. ++static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
  19434. ++{
  19435. ++ lockdep_assert_held(&ar->data_lock);
  19436. ++
  19437. ++ switch (ar->scan.state) {
  19438. ++ case ATH10K_SCAN_IDLE:
  19439. ++ case ATH10K_SCAN_STARTING:
  19440. ++ /* One suspected reason scan can be completed while starting is
  19441. ++ * if firmware fails to deliver all scan events to the host,
  19442. ++ * e.g. when transport pipe is full. This has been observed
  19443. ++ * with spectral scan phyerr events starving wmi transport
  19444. ++ * pipe. In such case the "scan completed" event should be (and
  19445. ++ * is) ignored by the host as it may be just firmware's scan
  19446. ++ * state machine recovering.
  19447. ++ */
  19448. ++ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
  19449. ++ ath10k_scan_state_str(ar->scan.state),
  19450. ++ ar->scan.state);
  19451. ++ break;
  19452. ++ case ATH10K_SCAN_RUNNING:
  19453. ++ case ATH10K_SCAN_ABORTING:
  19454. ++ __ath10k_scan_finish(ar);
  19455. ++ break;
  19456. ++ }
  19457. ++}
  19458. ++
  19459. ++static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
  19460. ++{
  19461. ++ lockdep_assert_held(&ar->data_lock);
  19462. ++
  19463. ++ switch (ar->scan.state) {
  19464. ++ case ATH10K_SCAN_IDLE:
  19465. ++ case ATH10K_SCAN_STARTING:
  19466. ++ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  19467. ++ ath10k_scan_state_str(ar->scan.state),
  19468. ++ ar->scan.state);
  19469. ++ break;
  19470. ++ case ATH10K_SCAN_RUNNING:
  19471. ++ case ATH10K_SCAN_ABORTING:
  19472. ++ ar->scan_channel = NULL;
  19473. ++ break;
  19474. ++ }
  19475. ++}
  19476. ++
  19477. ++static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
  19478. ++{
  19479. ++ lockdep_assert_held(&ar->data_lock);
  19480. ++
  19481. ++ switch (ar->scan.state) {
  19482. ++ case ATH10K_SCAN_IDLE:
  19483. ++ case ATH10K_SCAN_STARTING:
  19484. ++ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  19485. ++ ath10k_scan_state_str(ar->scan.state),
  19486. ++ ar->scan.state);
  19487. ++ break;
  19488. ++ case ATH10K_SCAN_RUNNING:
  19489. ++ case ATH10K_SCAN_ABORTING:
  19490. + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  19491. +- if (ar->scan.in_progress && ar->scan.is_roc &&
  19492. +- ar->scan.roc_freq == freq) {
  19493. ++
  19494. ++ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  19495. + complete(&ar->scan.on_channel);
  19496. +- }
  19497. + break;
  19498. ++ }
  19499. ++}
  19500. ++
  19501. ++static const char *
  19502. ++ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  19503. ++ enum wmi_scan_completion_reason reason)
  19504. ++{
  19505. ++ switch (type) {
  19506. ++ case WMI_SCAN_EVENT_STARTED:
  19507. ++ return "started";
  19508. ++ case WMI_SCAN_EVENT_COMPLETED:
  19509. ++ switch (reason) {
  19510. ++ case WMI_SCAN_REASON_COMPLETED:
  19511. ++ return "completed";
  19512. ++ case WMI_SCAN_REASON_CANCELLED:
  19513. ++ return "completed [cancelled]";
  19514. ++ case WMI_SCAN_REASON_PREEMPTED:
  19515. ++ return "completed [preempted]";
  19516. ++ case WMI_SCAN_REASON_TIMEDOUT:
  19517. ++ return "completed [timedout]";
  19518. ++ case WMI_SCAN_REASON_MAX:
  19519. ++ break;
  19520. ++ }
  19521. ++ return "completed [unknown]";
  19522. ++ case WMI_SCAN_EVENT_BSS_CHANNEL:
  19523. ++ return "bss channel";
  19524. ++ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  19525. ++ return "foreign channel";
  19526. + case WMI_SCAN_EVENT_DEQUEUED:
  19527. +- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
  19528. +- break;
  19529. ++ return "dequeued";
  19530. + case WMI_SCAN_EVENT_PREEMPTED:
  19531. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
  19532. ++ return "preempted";
  19533. ++ case WMI_SCAN_EVENT_START_FAILED:
  19534. ++ return "start failed";
  19535. ++ default:
  19536. ++ return "unknown";
  19537. ++ }
  19538. ++}
  19539. ++
  19540. ++static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
  19541. ++ struct wmi_scan_ev_arg *arg)
  19542. ++{
  19543. ++ struct wmi_scan_event *ev = (void *)skb->data;
  19544. ++
  19545. ++ if (skb->len < sizeof(*ev))
  19546. ++ return -EPROTO;
  19547. ++
  19548. ++ skb_pull(skb, sizeof(*ev));
  19549. ++ arg->event_type = ev->event_type;
  19550. ++ arg->reason = ev->reason;
  19551. ++ arg->channel_freq = ev->channel_freq;
  19552. ++ arg->scan_req_id = ev->scan_req_id;
  19553. ++ arg->scan_id = ev->scan_id;
  19554. ++ arg->vdev_id = ev->vdev_id;
  19555. ++
  19556. ++ return 0;
  19557. ++}
  19558. ++
  19559. ++int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  19560. ++{
  19561. ++ struct wmi_scan_ev_arg arg = {};
  19562. ++ enum wmi_scan_event_type event_type;
  19563. ++ enum wmi_scan_completion_reason reason;
  19564. ++ u32 freq;
  19565. ++ u32 req_id;
  19566. ++ u32 scan_id;
  19567. ++ u32 vdev_id;
  19568. ++ int ret;
  19569. ++
  19570. ++ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
  19571. ++ if (ret) {
  19572. ++ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
  19573. ++ return ret;
  19574. ++ }
  19575. ++
  19576. ++ event_type = __le32_to_cpu(arg.event_type);
  19577. ++ reason = __le32_to_cpu(arg.reason);
  19578. ++ freq = __le32_to_cpu(arg.channel_freq);
  19579. ++ req_id = __le32_to_cpu(arg.scan_req_id);
  19580. ++ scan_id = __le32_to_cpu(arg.scan_id);
  19581. ++ vdev_id = __le32_to_cpu(arg.vdev_id);
  19582. ++
  19583. ++ spin_lock_bh(&ar->data_lock);
  19584. ++
  19585. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  19586. ++ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  19587. ++ ath10k_wmi_event_scan_type_str(event_type, reason),
  19588. ++ event_type, reason, freq, req_id, scan_id, vdev_id,
  19589. ++ ath10k_scan_state_str(ar->scan.state), ar->scan.state);
  19590. ++
  19591. ++ switch (event_type) {
  19592. ++ case WMI_SCAN_EVENT_STARTED:
  19593. ++ ath10k_wmi_event_scan_started(ar);
  19594. ++ break;
  19595. ++ case WMI_SCAN_EVENT_COMPLETED:
  19596. ++ ath10k_wmi_event_scan_completed(ar);
  19597. ++ break;
  19598. ++ case WMI_SCAN_EVENT_BSS_CHANNEL:
  19599. ++ ath10k_wmi_event_scan_bss_chan(ar);
  19600. ++ break;
  19601. ++ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  19602. ++ ath10k_wmi_event_scan_foreign_chan(ar, freq);
  19603. + break;
  19604. + case WMI_SCAN_EVENT_START_FAILED:
  19605. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
  19606. ++ ath10k_warn(ar, "received scan start failure event\n");
  19607. ++ ath10k_wmi_event_scan_start_failed(ar);
  19608. + break;
  19609. ++ case WMI_SCAN_EVENT_DEQUEUED:
  19610. ++ case WMI_SCAN_EVENT_PREEMPTED:
  19611. + default:
  19612. + break;
  19613. + }
  19614. +@@ -865,13 +1408,86 @@ static inline u8 get_rate_idx(u32 rate,
  19615. + return rate_idx;
  19616. + }
  19617. +
  19618. +-static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  19619. ++/* If keys are configured, HW decrypts all frames
  19620. ++ * with protected bit set. Mark such frames as decrypted.
  19621. ++ */
  19622. ++static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
  19623. ++ struct sk_buff *skb,
  19624. ++ struct ieee80211_rx_status *status)
  19625. ++{
  19626. ++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  19627. ++ unsigned int hdrlen;
  19628. ++ bool peer_key;
  19629. ++ u8 *addr, keyidx;
  19630. ++
  19631. ++ if (!ieee80211_is_auth(hdr->frame_control) ||
  19632. ++ !ieee80211_has_protected(hdr->frame_control))
  19633. ++ return;
  19634. ++
  19635. ++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
  19636. ++ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
  19637. ++ return;
  19638. ++
  19639. ++ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
  19640. ++ addr = ieee80211_get_SA(hdr);
  19641. ++
  19642. ++ spin_lock_bh(&ar->data_lock);
  19643. ++ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
  19644. ++ spin_unlock_bh(&ar->data_lock);
  19645. ++
  19646. ++ if (peer_key) {
  19647. ++ ath10k_dbg(ar, ATH10K_DBG_MAC,
  19648. ++ "mac wep key present for peer %pM\n", addr);
  19649. ++ status->flag |= RX_FLAG_DECRYPTED;
  19650. ++ }
  19651. ++}
  19652. ++
  19653. ++static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
  19654. ++ struct wmi_mgmt_rx_ev_arg *arg)
  19655. + {
  19656. + struct wmi_mgmt_rx_event_v1 *ev_v1;
  19657. + struct wmi_mgmt_rx_event_v2 *ev_v2;
  19658. + struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
  19659. ++ size_t pull_len;
  19660. ++ u32 msdu_len;
  19661. ++
  19662. ++ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  19663. ++ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  19664. ++ ev_hdr = &ev_v2->hdr.v1;
  19665. ++ pull_len = sizeof(*ev_v2);
  19666. ++ } else {
  19667. ++ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  19668. ++ ev_hdr = &ev_v1->hdr;
  19669. ++ pull_len = sizeof(*ev_v1);
  19670. ++ }
  19671. ++
  19672. ++ if (skb->len < pull_len)
  19673. ++ return -EPROTO;
  19674. ++
  19675. ++ skb_pull(skb, pull_len);
  19676. ++ arg->channel = ev_hdr->channel;
  19677. ++ arg->buf_len = ev_hdr->buf_len;
  19678. ++ arg->status = ev_hdr->status;
  19679. ++ arg->snr = ev_hdr->snr;
  19680. ++ arg->phy_mode = ev_hdr->phy_mode;
  19681. ++ arg->rate = ev_hdr->rate;
  19682. ++
  19683. ++ msdu_len = __le32_to_cpu(arg->buf_len);
  19684. ++ if (skb->len < msdu_len)
  19685. ++ return -EPROTO;
  19686. ++
  19687. ++ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
  19688. ++ * trailer with credit update. Trim the excess garbage.
  19689. ++ */
  19690. ++ skb_trim(skb, msdu_len);
  19691. ++
  19692. ++ return 0;
  19693. ++}
  19694. ++
  19695. ++int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  19696. ++{
  19697. ++ struct wmi_mgmt_rx_ev_arg arg = {};
  19698. + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  19699. +- struct ieee80211_channel *ch;
  19700. + struct ieee80211_hdr *hdr;
  19701. + u32 rx_status;
  19702. + u32 channel;
  19703. +@@ -880,28 +1496,24 @@ static int ath10k_wmi_event_mgmt_rx(stru
  19704. + u32 rate;
  19705. + u32 buf_len;
  19706. + u16 fc;
  19707. +- int pull_len;
  19708. ++ int ret;
  19709. +
  19710. +- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  19711. +- ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  19712. +- ev_hdr = &ev_v2->hdr.v1;
  19713. +- pull_len = sizeof(*ev_v2);
  19714. +- } else {
  19715. +- ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  19716. +- ev_hdr = &ev_v1->hdr;
  19717. +- pull_len = sizeof(*ev_v1);
  19718. ++ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
  19719. ++ if (ret) {
  19720. ++ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
  19721. ++ return ret;
  19722. + }
  19723. +
  19724. +- channel = __le32_to_cpu(ev_hdr->channel);
  19725. +- buf_len = __le32_to_cpu(ev_hdr->buf_len);
  19726. +- rx_status = __le32_to_cpu(ev_hdr->status);
  19727. +- snr = __le32_to_cpu(ev_hdr->snr);
  19728. +- phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
  19729. +- rate = __le32_to_cpu(ev_hdr->rate);
  19730. ++ channel = __le32_to_cpu(arg.channel);
  19731. ++ buf_len = __le32_to_cpu(arg.buf_len);
  19732. ++ rx_status = __le32_to_cpu(arg.status);
  19733. ++ snr = __le32_to_cpu(arg.snr);
  19734. ++ phy_mode = __le32_to_cpu(arg.phy_mode);
  19735. ++ rate = __le32_to_cpu(arg.rate);
  19736. +
  19737. + memset(status, 0, sizeof(*status));
  19738. +
  19739. +- ath10k_dbg(ATH10K_DBG_MGMT,
  19740. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
  19741. + "event mgmt rx status %08x\n", rx_status);
  19742. +
  19743. + if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  19744. +@@ -919,66 +1531,70 @@ static int ath10k_wmi_event_mgmt_rx(stru
  19745. + return 0;
  19746. + }
  19747. +
  19748. +- if (rx_status & WMI_RX_STATUS_ERR_CRC)
  19749. +- status->flag |= RX_FLAG_FAILED_FCS_CRC;
  19750. ++ if (rx_status & WMI_RX_STATUS_ERR_CRC) {
  19751. ++ dev_kfree_skb(skb);
  19752. ++ return 0;
  19753. ++ }
  19754. ++
  19755. + if (rx_status & WMI_RX_STATUS_ERR_MIC)
  19756. + status->flag |= RX_FLAG_MMIC_ERROR;
  19757. +
  19758. +- /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
  19759. ++ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
  19760. + * MODE_11B. This means phy_mode is not a reliable source for the band
  19761. +- * of mgmt rx. */
  19762. +-
  19763. +- ch = ar->scan_channel;
  19764. +- if (!ch)
  19765. +- ch = ar->rx_channel;
  19766. +-
  19767. +- if (ch) {
  19768. +- status->band = ch->band;
  19769. +-
  19770. +- if (phy_mode == MODE_11B &&
  19771. +- status->band == IEEE80211_BAND_5GHZ)
  19772. +- ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  19773. ++ * of mgmt rx.
  19774. ++ */
  19775. ++ if (channel >= 1 && channel <= 14) {
  19776. ++ status->band = IEEE80211_BAND_2GHZ;
  19777. ++ } else if (channel >= 36 && channel <= 165) {
  19778. ++ status->band = IEEE80211_BAND_5GHZ;
  19779. + } else {
  19780. +- ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
  19781. +- status->band = phy_mode_to_band(phy_mode);
  19782. ++ /* Shouldn't happen unless list of advertised channels to
  19783. ++ * mac80211 has been changed.
  19784. ++ */
  19785. ++ WARN_ON_ONCE(1);
  19786. ++ dev_kfree_skb(skb);
  19787. ++ return 0;
  19788. + }
  19789. +
  19790. ++ if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
  19791. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  19792. ++
  19793. + status->freq = ieee80211_channel_to_frequency(channel, status->band);
  19794. + status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  19795. + status->rate_idx = get_rate_idx(rate, status->band);
  19796. +
  19797. +- skb_pull(skb, pull_len);
  19798. +-
  19799. + hdr = (struct ieee80211_hdr *)skb->data;
  19800. + fc = le16_to_cpu(hdr->frame_control);
  19801. +
  19802. ++ ath10k_wmi_handle_wep_reauth(ar, skb, status);
  19803. ++
  19804. + /* FW delivers WEP Shared Auth frame with Protected Bit set and
  19805. + * encrypted payload. However in case of PMF it delivers decrypted
  19806. + * frames with Protected Bit set. */
  19807. + if (ieee80211_has_protected(hdr->frame_control) &&
  19808. + !ieee80211_is_auth(hdr->frame_control)) {
  19809. +- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
  19810. +- RX_FLAG_MMIC_STRIPPED;
  19811. +- hdr->frame_control = __cpu_to_le16(fc &
  19812. ++ status->flag |= RX_FLAG_DECRYPTED;
  19813. ++
  19814. ++ if (!ieee80211_is_action(hdr->frame_control) &&
  19815. ++ !ieee80211_is_deauth(hdr->frame_control) &&
  19816. ++ !ieee80211_is_disassoc(hdr->frame_control)) {
  19817. ++ status->flag |= RX_FLAG_IV_STRIPPED |
  19818. ++ RX_FLAG_MMIC_STRIPPED;
  19819. ++ hdr->frame_control = __cpu_to_le16(fc &
  19820. + ~IEEE80211_FCTL_PROTECTED);
  19821. ++ }
  19822. + }
  19823. +
  19824. +- ath10k_dbg(ATH10K_DBG_MGMT,
  19825. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
  19826. + "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  19827. + skb, skb->len,
  19828. + fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  19829. +
  19830. +- ath10k_dbg(ATH10K_DBG_MGMT,
  19831. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
  19832. + "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  19833. + status->freq, status->band, status->signal,
  19834. + status->rate_idx);
  19835. +
  19836. +- /*
  19837. +- * packets from HTC come aligned to 4byte boundaries
  19838. +- * because they can originally come in along with a trailer
  19839. +- */
  19840. +- skb_trim(skb, buf_len);
  19841. +-
  19842. + ieee80211_rx(ar->hw, skb);
  19843. + return 0;
  19844. + }
  19845. +@@ -1002,37 +1618,65 @@ exit:
  19846. + return idx;
  19847. + }
  19848. +
  19849. +-static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  19850. ++static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
  19851. ++ struct wmi_ch_info_ev_arg *arg)
  19852. ++{
  19853. ++ struct wmi_chan_info_event *ev = (void *)skb->data;
  19854. ++
  19855. ++ if (skb->len < sizeof(*ev))
  19856. ++ return -EPROTO;
  19857. ++
  19858. ++ skb_pull(skb, sizeof(*ev));
  19859. ++ arg->err_code = ev->err_code;
  19860. ++ arg->freq = ev->freq;
  19861. ++ arg->cmd_flags = ev->cmd_flags;
  19862. ++ arg->noise_floor = ev->noise_floor;
  19863. ++ arg->rx_clear_count = ev->rx_clear_count;
  19864. ++ arg->cycle_count = ev->cycle_count;
  19865. ++
  19866. ++ return 0;
  19867. ++}
  19868. ++
  19869. ++void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  19870. + {
  19871. +- struct wmi_chan_info_event *ev;
  19872. ++ struct wmi_ch_info_ev_arg arg = {};
  19873. + struct survey_info *survey;
  19874. + u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
  19875. +- int idx;
  19876. ++ int idx, ret;
  19877. +
  19878. +- ev = (struct wmi_chan_info_event *)skb->data;
  19879. ++ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
  19880. ++ if (ret) {
  19881. ++ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
  19882. ++ return;
  19883. ++ }
  19884. +
  19885. +- err_code = __le32_to_cpu(ev->err_code);
  19886. +- freq = __le32_to_cpu(ev->freq);
  19887. +- cmd_flags = __le32_to_cpu(ev->cmd_flags);
  19888. +- noise_floor = __le32_to_cpu(ev->noise_floor);
  19889. +- rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
  19890. +- cycle_count = __le32_to_cpu(ev->cycle_count);
  19891. ++ err_code = __le32_to_cpu(arg.err_code);
  19892. ++ freq = __le32_to_cpu(arg.freq);
  19893. ++ cmd_flags = __le32_to_cpu(arg.cmd_flags);
  19894. ++ noise_floor = __le32_to_cpu(arg.noise_floor);
  19895. ++ rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
  19896. ++ cycle_count = __le32_to_cpu(arg.cycle_count);
  19897. +
  19898. +- ath10k_dbg(ATH10K_DBG_WMI,
  19899. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  19900. + "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
  19901. + err_code, freq, cmd_flags, noise_floor, rx_clear_count,
  19902. + cycle_count);
  19903. +
  19904. + spin_lock_bh(&ar->data_lock);
  19905. +
  19906. +- if (!ar->scan.in_progress) {
  19907. +- ath10k_warn("chan info event without a scan request?\n");
  19908. ++ switch (ar->scan.state) {
  19909. ++ case ATH10K_SCAN_IDLE:
  19910. ++ case ATH10K_SCAN_STARTING:
  19911. ++ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
  19912. + goto exit;
  19913. ++ case ATH10K_SCAN_RUNNING:
  19914. ++ case ATH10K_SCAN_ABORTING:
  19915. ++ break;
  19916. + }
  19917. +
  19918. + idx = freq_to_idx(ar, freq);
  19919. + if (idx >= ARRAY_SIZE(ar->survey)) {
  19920. +- ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
  19921. ++ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  19922. + freq, idx);
  19923. + goto exit;
  19924. + }
  19925. +@@ -1061,191 +1705,579 @@ exit:
  19926. + spin_unlock_bh(&ar->data_lock);
  19927. + }
  19928. +
  19929. +-static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  19930. ++void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  19931. + {
  19932. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  19933. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  19934. + }
  19935. +
  19936. +-static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  19937. ++int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  19938. + {
  19939. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  19940. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  19941. + skb->len);
  19942. +
  19943. +- trace_ath10k_wmi_dbglog(skb->data, skb->len);
  19944. ++ trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
  19945. +
  19946. + return 0;
  19947. + }
  19948. +
  19949. +-static void ath10k_wmi_event_update_stats(struct ath10k *ar,
  19950. +- struct sk_buff *skb)
  19951. ++void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
  19952. ++ struct ath10k_fw_stats_pdev *dst)
  19953. + {
  19954. +- struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
  19955. +-
  19956. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  19957. +-
  19958. +- ath10k_debug_read_target_stats(ar, ev);
  19959. +-}
  19960. +-
  19961. +-static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
  19962. +- struct sk_buff *skb)
  19963. ++ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
  19964. ++ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
  19965. ++ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
  19966. ++ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
  19967. ++ dst->cycle_count = __le32_to_cpu(src->cycle_count);
  19968. ++ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
  19969. ++ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
  19970. ++}
  19971. ++
  19972. ++void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
  19973. ++ struct ath10k_fw_stats_pdev *dst)
  19974. ++{
  19975. ++ dst->comp_queued = __le32_to_cpu(src->comp_queued);
  19976. ++ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
  19977. ++ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
  19978. ++ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
  19979. ++ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
  19980. ++ dst->local_enqued = __le32_to_cpu(src->local_enqued);
  19981. ++ dst->local_freed = __le32_to_cpu(src->local_freed);
  19982. ++ dst->hw_queued = __le32_to_cpu(src->hw_queued);
  19983. ++ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
  19984. ++ dst->underrun = __le32_to_cpu(src->underrun);
  19985. ++ dst->tx_abort = __le32_to_cpu(src->tx_abort);
  19986. ++ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
  19987. ++ dst->tx_ko = __le32_to_cpu(src->tx_ko);
  19988. ++ dst->data_rc = __le32_to_cpu(src->data_rc);
  19989. ++ dst->self_triggers = __le32_to_cpu(src->self_triggers);
  19990. ++ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
  19991. ++ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
  19992. ++ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
  19993. ++ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
  19994. ++ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
  19995. ++ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
  19996. ++ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
  19997. ++}
  19998. ++
  19999. ++void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
  20000. ++ struct ath10k_fw_stats_pdev *dst)
  20001. ++{
  20002. ++ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
  20003. ++ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
  20004. ++ dst->r0_frags = __le32_to_cpu(src->r0_frags);
  20005. ++ dst->r1_frags = __le32_to_cpu(src->r1_frags);
  20006. ++ dst->r2_frags = __le32_to_cpu(src->r2_frags);
  20007. ++ dst->r3_frags = __le32_to_cpu(src->r3_frags);
  20008. ++ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
  20009. ++ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
  20010. ++ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
  20011. ++ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
  20012. ++ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
  20013. ++ dst->phy_errs = __le32_to_cpu(src->phy_errs);
  20014. ++ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
  20015. ++ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
  20016. ++}
  20017. ++
  20018. ++void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
  20019. ++ struct ath10k_fw_stats_pdev *dst)
  20020. ++{
  20021. ++ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
  20022. ++ dst->rts_bad = __le32_to_cpu(src->rts_bad);
  20023. ++ dst->rts_good = __le32_to_cpu(src->rts_good);
  20024. ++ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
  20025. ++ dst->no_beacons = __le32_to_cpu(src->no_beacons);
  20026. ++ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
  20027. ++}
  20028. ++
  20029. ++void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
  20030. ++ struct ath10k_fw_stats_peer *dst)
  20031. ++{
  20032. ++ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
  20033. ++ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
  20034. ++ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
  20035. ++}
  20036. ++
  20037. ++static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
  20038. ++ struct sk_buff *skb,
  20039. ++ struct ath10k_fw_stats *stats)
  20040. + {
  20041. +- struct wmi_vdev_start_response_event *ev;
  20042. ++ const struct wmi_stats_event *ev = (void *)skb->data;
  20043. ++ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  20044. ++ int i;
  20045. +
  20046. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  20047. ++ if (!skb_pull(skb, sizeof(*ev)))
  20048. ++ return -EPROTO;
  20049. +
  20050. +- ev = (struct wmi_vdev_start_response_event *)skb->data;
  20051. ++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  20052. ++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  20053. ++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  20054. ++
  20055. ++ for (i = 0; i < num_pdev_stats; i++) {
  20056. ++ const struct wmi_pdev_stats *src;
  20057. ++ struct ath10k_fw_stats_pdev *dst;
  20058. ++
  20059. ++ src = (void *)skb->data;
  20060. ++ if (!skb_pull(skb, sizeof(*src)))
  20061. ++ return -EPROTO;
  20062. +
  20063. +- if (WARN_ON(__le32_to_cpu(ev->status)))
  20064. +- return;
  20065. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20066. ++ if (!dst)
  20067. ++ continue;
  20068. +
  20069. +- complete(&ar->vdev_setup_done);
  20070. +-}
  20071. ++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  20072. ++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  20073. ++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  20074. +
  20075. +-static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
  20076. +- struct sk_buff *skb)
  20077. +-{
  20078. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  20079. +- complete(&ar->vdev_setup_done);
  20080. +-}
  20081. ++ list_add_tail(&dst->list, &stats->pdevs);
  20082. ++ }
  20083. +
  20084. +-static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
  20085. +- struct sk_buff *skb)
  20086. +-{
  20087. +- struct wmi_peer_sta_kickout_event *ev;
  20088. +- struct ieee80211_sta *sta;
  20089. ++ /* fw doesn't implement vdev stats */
  20090. +
  20091. +- ev = (struct wmi_peer_sta_kickout_event *)skb->data;
  20092. ++ for (i = 0; i < num_peer_stats; i++) {
  20093. ++ const struct wmi_peer_stats *src;
  20094. ++ struct ath10k_fw_stats_peer *dst;
  20095. +
  20096. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  20097. +- ev->peer_macaddr.addr);
  20098. ++ src = (void *)skb->data;
  20099. ++ if (!skb_pull(skb, sizeof(*src)))
  20100. ++ return -EPROTO;
  20101. +
  20102. +- rcu_read_lock();
  20103. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20104. ++ if (!dst)
  20105. ++ continue;
  20106. +
  20107. +- sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
  20108. +- if (!sta) {
  20109. +- ath10k_warn("Spurious quick kickout for STA %pM\n",
  20110. +- ev->peer_macaddr.addr);
  20111. +- goto exit;
  20112. ++ ath10k_wmi_pull_peer_stats(src, dst);
  20113. ++ list_add_tail(&dst->list, &stats->peers);
  20114. + }
  20115. +
  20116. +- ieee80211_report_low_ack(sta, 10);
  20117. +-
  20118. +-exit:
  20119. +- rcu_read_unlock();
  20120. ++ return 0;
  20121. + }
  20122. +
  20123. +-/*
  20124. +- * FIXME
  20125. +- *
  20126. +- * We don't report to mac80211 sleep state of connected
  20127. +- * stations. Due to this mac80211 can't fill in TIM IE
  20128. +- * correctly.
  20129. +- *
  20130. +- * I know of no way of getting nullfunc frames that contain
  20131. +- * sleep transition from connected stations - these do not
  20132. +- * seem to be sent from the target to the host. There also
  20133. +- * doesn't seem to be a dedicated event for that. So the
  20134. +- * only way left to do this would be to read tim_bitmap
  20135. +- * during SWBA.
  20136. +- *
  20137. +- * We could probably try using tim_bitmap from SWBA to tell
  20138. +- * mac80211 which stations are asleep and which are not. The
  20139. +- * problem here is calling mac80211 functions so many times
  20140. +- * could take too long and make us miss the time to submit
  20141. +- * the beacon to the target.
  20142. +- *
  20143. +- * So as a workaround we try to extend the TIM IE if there
  20144. +- * is unicast buffered for stations with aid > 7 and fill it
  20145. +- * in ourselves.
  20146. +- */
  20147. +-static void ath10k_wmi_update_tim(struct ath10k *ar,
  20148. +- struct ath10k_vif *arvif,
  20149. +- struct sk_buff *bcn,
  20150. +- struct wmi_bcn_info *bcn_info)
  20151. ++static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
  20152. ++ struct sk_buff *skb,
  20153. ++ struct ath10k_fw_stats *stats)
  20154. + {
  20155. +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  20156. +- struct ieee80211_tim_ie *tim;
  20157. +- u8 *ies, *ie;
  20158. +- u8 ie_len, pvm_len;
  20159. ++ const struct wmi_stats_event *ev = (void *)skb->data;
  20160. ++ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  20161. ++ int i;
  20162. +
  20163. +- /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  20164. +- * we must copy the bitmap upon change and reuse it later */
  20165. +- if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
  20166. +- int i;
  20167. ++ if (!skb_pull(skb, sizeof(*ev)))
  20168. ++ return -EPROTO;
  20169. +
  20170. +- BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  20171. +- sizeof(bcn_info->tim_info.tim_bitmap));
  20172. ++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  20173. ++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  20174. ++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  20175. ++
  20176. ++ for (i = 0; i < num_pdev_stats; i++) {
  20177. ++ const struct wmi_10x_pdev_stats *src;
  20178. ++ struct ath10k_fw_stats_pdev *dst;
  20179. ++
  20180. ++ src = (void *)skb->data;
  20181. ++ if (!skb_pull(skb, sizeof(*src)))
  20182. ++ return -EPROTO;
  20183. +
  20184. +- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  20185. +- __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
  20186. +- u32 v = __le32_to_cpu(t);
  20187. +- arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  20188. +- }
  20189. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20190. ++ if (!dst)
  20191. ++ continue;
  20192. +
  20193. +- /* FW reports either length 0 or 16
  20194. +- * so we calculate this on our own */
  20195. +- arvif->u.ap.tim_len = 0;
  20196. +- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  20197. +- if (arvif->u.ap.tim_bitmap[i])
  20198. +- arvif->u.ap.tim_len = i;
  20199. ++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  20200. ++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  20201. ++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  20202. ++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  20203. +
  20204. +- arvif->u.ap.tim_len++;
  20205. ++ list_add_tail(&dst->list, &stats->pdevs);
  20206. + }
  20207. +
  20208. +- ies = bcn->data;
  20209. +- ies += ieee80211_hdrlen(hdr->frame_control);
  20210. +- ies += 12; /* fixed parameters */
  20211. ++ /* fw doesn't implement vdev stats */
  20212. +
  20213. +- ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  20214. +- (u8 *)skb_tail_pointer(bcn) - ies);
  20215. +- if (!ie) {
  20216. +- if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  20217. +- ath10k_warn("no tim ie found;\n");
  20218. +- return;
  20219. +- }
  20220. ++ for (i = 0; i < num_peer_stats; i++) {
  20221. ++ const struct wmi_10x_peer_stats *src;
  20222. ++ struct ath10k_fw_stats_peer *dst;
  20223. +
  20224. +- tim = (void *)ie + 2;
  20225. +- ie_len = ie[1];
  20226. +- pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  20227. ++ src = (void *)skb->data;
  20228. ++ if (!skb_pull(skb, sizeof(*src)))
  20229. ++ return -EPROTO;
  20230. +
  20231. +- if (pvm_len < arvif->u.ap.tim_len) {
  20232. +- int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  20233. +- int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  20234. +- void *next_ie = ie + 2 + ie_len;
  20235. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20236. ++ if (!dst)
  20237. ++ continue;
  20238. +
  20239. +- if (skb_put(bcn, expand_size)) {
  20240. +- memmove(next_ie + expand_size, next_ie, move_size);
  20241. ++ ath10k_wmi_pull_peer_stats(&src->old, dst);
  20242. +
  20243. +- ie[1] += expand_size;
  20244. +- ie_len += expand_size;
  20245. +- pvm_len += expand_size;
  20246. +- } else {
  20247. +- ath10k_warn("tim expansion failed\n");
  20248. +- }
  20249. +- }
  20250. ++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  20251. +
  20252. +- if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  20253. +- ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
  20254. +- return;
  20255. ++ list_add_tail(&dst->list, &stats->peers);
  20256. + }
  20257. +
  20258. +- tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
  20259. +- memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  20260. ++ return 0;
  20261. ++}
  20262. +
  20263. +- if (tim->dtim_count == 0) {
  20264. +- ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  20265. ++static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
  20266. ++ struct sk_buff *skb,
  20267. ++ struct ath10k_fw_stats *stats)
  20268. ++{
  20269. ++ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
  20270. ++ u32 num_pdev_stats;
  20271. ++ u32 num_pdev_ext_stats;
  20272. ++ u32 num_vdev_stats;
  20273. ++ u32 num_peer_stats;
  20274. ++ int i;
  20275. +
  20276. +- if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
  20277. +- ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  20278. +- }
  20279. ++ if (!skb_pull(skb, sizeof(*ev)))
  20280. ++ return -EPROTO;
  20281. +
  20282. +- ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  20283. +- tim->dtim_count, tim->dtim_period,
  20284. +- tim->bitmap_ctrl, pvm_len);
  20285. ++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  20286. ++ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
  20287. ++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  20288. ++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  20289. ++
  20290. ++ for (i = 0; i < num_pdev_stats; i++) {
  20291. ++ const struct wmi_10_2_pdev_stats *src;
  20292. ++ struct ath10k_fw_stats_pdev *dst;
  20293. ++
  20294. ++ src = (void *)skb->data;
  20295. ++ if (!skb_pull(skb, sizeof(*src)))
  20296. ++ return -EPROTO;
  20297. ++
  20298. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20299. ++ if (!dst)
  20300. ++ continue;
  20301. ++
  20302. ++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  20303. ++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  20304. ++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  20305. ++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  20306. ++ /* FIXME: expose 10.2 specific values */
  20307. ++
  20308. ++ list_add_tail(&dst->list, &stats->pdevs);
  20309. ++ }
  20310. ++
  20311. ++ for (i = 0; i < num_pdev_ext_stats; i++) {
  20312. ++ const struct wmi_10_2_pdev_ext_stats *src;
  20313. ++
  20314. ++ src = (void *)skb->data;
  20315. ++ if (!skb_pull(skb, sizeof(*src)))
  20316. ++ return -EPROTO;
  20317. ++
  20318. ++ /* FIXME: expose values to userspace
  20319. ++ *
  20320. ++ * Note: Even though this loop seems to do nothing it is
  20321. ++ * required to parse following sub-structures properly.
  20322. ++ */
  20323. ++ }
  20324. ++
  20325. ++ /* fw doesn't implement vdev stats */
  20326. ++
  20327. ++ for (i = 0; i < num_peer_stats; i++) {
  20328. ++ const struct wmi_10_2_peer_stats *src;
  20329. ++ struct ath10k_fw_stats_peer *dst;
  20330. ++
  20331. ++ src = (void *)skb->data;
  20332. ++ if (!skb_pull(skb, sizeof(*src)))
  20333. ++ return -EPROTO;
  20334. ++
  20335. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20336. ++ if (!dst)
  20337. ++ continue;
  20338. ++
  20339. ++ ath10k_wmi_pull_peer_stats(&src->old, dst);
  20340. ++
  20341. ++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  20342. ++ /* FIXME: expose 10.2 specific values */
  20343. ++
  20344. ++ list_add_tail(&dst->list, &stats->peers);
  20345. ++ }
  20346. ++
  20347. ++ return 0;
  20348. ++}
  20349. ++
  20350. ++static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
  20351. ++ struct sk_buff *skb,
  20352. ++ struct ath10k_fw_stats *stats)
  20353. ++{
  20354. ++ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
  20355. ++ u32 num_pdev_stats;
  20356. ++ u32 num_pdev_ext_stats;
  20357. ++ u32 num_vdev_stats;
  20358. ++ u32 num_peer_stats;
  20359. ++ int i;
  20360. ++
  20361. ++ if (!skb_pull(skb, sizeof(*ev)))
  20362. ++ return -EPROTO;
  20363. ++
  20364. ++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  20365. ++ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
  20366. ++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  20367. ++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  20368. ++
  20369. ++ for (i = 0; i < num_pdev_stats; i++) {
  20370. ++ const struct wmi_10_2_pdev_stats *src;
  20371. ++ struct ath10k_fw_stats_pdev *dst;
  20372. ++
  20373. ++ src = (void *)skb->data;
  20374. ++ if (!skb_pull(skb, sizeof(*src)))
  20375. ++ return -EPROTO;
  20376. ++
  20377. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20378. ++ if (!dst)
  20379. ++ continue;
  20380. ++
  20381. ++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  20382. ++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  20383. ++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  20384. ++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  20385. ++ /* FIXME: expose 10.2 specific values */
  20386. ++
  20387. ++ list_add_tail(&dst->list, &stats->pdevs);
  20388. ++ }
  20389. ++
  20390. ++ for (i = 0; i < num_pdev_ext_stats; i++) {
  20391. ++ const struct wmi_10_2_pdev_ext_stats *src;
  20392. ++
  20393. ++ src = (void *)skb->data;
  20394. ++ if (!skb_pull(skb, sizeof(*src)))
  20395. ++ return -EPROTO;
  20396. ++
  20397. ++ /* FIXME: expose values to userspace
  20398. ++ *
  20399. ++ * Note: Even though this loop seems to do nothing it is
  20400. ++ * required to parse following sub-structures properly.
  20401. ++ */
  20402. ++ }
  20403. ++
  20404. ++ /* fw doesn't implement vdev stats */
  20405. ++
  20406. ++ for (i = 0; i < num_peer_stats; i++) {
  20407. ++ const struct wmi_10_2_4_peer_stats *src;
  20408. ++ struct ath10k_fw_stats_peer *dst;
  20409. ++
  20410. ++ src = (void *)skb->data;
  20411. ++ if (!skb_pull(skb, sizeof(*src)))
  20412. ++ return -EPROTO;
  20413. ++
  20414. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  20415. ++ if (!dst)
  20416. ++ continue;
  20417. ++
  20418. ++ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
  20419. ++
  20420. ++ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
  20421. ++ /* FIXME: expose 10.2 specific values */
  20422. ++
  20423. ++ list_add_tail(&dst->list, &stats->peers);
  20424. ++ }
  20425. ++
  20426. ++ return 0;
  20427. ++}
  20428. ++
  20429. ++void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
  20430. ++{
  20431. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  20432. ++ ath10k_debug_fw_stats_process(ar, skb);
  20433. ++}
  20434. ++
  20435. ++static int
  20436. ++ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
  20437. ++ struct wmi_vdev_start_ev_arg *arg)
  20438. ++{
  20439. ++ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
  20440. ++
  20441. ++ if (skb->len < sizeof(*ev))
  20442. ++ return -EPROTO;
  20443. ++
  20444. ++ skb_pull(skb, sizeof(*ev));
  20445. ++ arg->vdev_id = ev->vdev_id;
  20446. ++ arg->req_id = ev->req_id;
  20447. ++ arg->resp_type = ev->resp_type;
  20448. ++ arg->status = ev->status;
  20449. ++
  20450. ++ return 0;
  20451. ++}
  20452. ++
  20453. ++void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
  20454. ++{
  20455. ++ struct wmi_vdev_start_ev_arg arg = {};
  20456. ++ int ret;
  20457. ++
  20458. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  20459. ++
  20460. ++ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
  20461. ++ if (ret) {
  20462. ++ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
  20463. ++ return;
  20464. ++ }
  20465. ++
  20466. ++ if (WARN_ON(__le32_to_cpu(arg.status)))
  20467. ++ return;
  20468. ++
  20469. ++ complete(&ar->vdev_setup_done);
  20470. ++}
  20471. ++
  20472. ++void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
  20473. ++{
  20474. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  20475. ++ complete(&ar->vdev_setup_done);
  20476. ++}
  20477. ++
  20478. ++static int
  20479. ++ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
  20480. ++ struct wmi_peer_kick_ev_arg *arg)
  20481. ++{
  20482. ++ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
  20483. ++
  20484. ++ if (skb->len < sizeof(*ev))
  20485. ++ return -EPROTO;
  20486. ++
  20487. ++ skb_pull(skb, sizeof(*ev));
  20488. ++ arg->mac_addr = ev->peer_macaddr.addr;
  20489. ++
  20490. ++ return 0;
  20491. ++}
  20492. ++
  20493. ++void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
  20494. ++{
  20495. ++ struct wmi_peer_kick_ev_arg arg = {};
  20496. ++ struct ieee80211_sta *sta;
  20497. ++ int ret;
  20498. ++
  20499. ++ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
  20500. ++ if (ret) {
  20501. ++ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
  20502. ++ ret);
  20503. ++ return;
  20504. ++ }
  20505. ++
  20506. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  20507. ++ arg.mac_addr);
  20508. ++
  20509. ++ rcu_read_lock();
  20510. ++
  20511. ++ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
  20512. ++ if (!sta) {
  20513. ++ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
  20514. ++ arg.mac_addr);
  20515. ++ goto exit;
  20516. ++ }
  20517. ++
  20518. ++ ieee80211_report_low_ack(sta, 10);
  20519. ++
  20520. ++exit:
  20521. ++ rcu_read_unlock();
  20522. ++}
  20523. ++
  20524. ++/*
  20525. ++ * FIXME
  20526. ++ *
  20527. ++ * We don't report to mac80211 sleep state of connected
  20528. ++ * stations. Due to this mac80211 can't fill in TIM IE
  20529. ++ * correctly.
  20530. ++ *
  20531. ++ * I know of no way of getting nullfunc frames that contain
  20532. ++ * sleep transition from connected stations - these do not
  20533. ++ * seem to be sent from the target to the host. There also
  20534. ++ * doesn't seem to be a dedicated event for that. So the
  20535. ++ * only way left to do this would be to read tim_bitmap
  20536. ++ * during SWBA.
  20537. ++ *
  20538. ++ * We could probably try using tim_bitmap from SWBA to tell
  20539. ++ * mac80211 which stations are asleep and which are not. The
  20540. ++ * problem here is calling mac80211 functions so many times
  20541. ++ * could take too long and make us miss the time to submit
  20542. ++ * the beacon to the target.
  20543. ++ *
  20544. ++ * So as a workaround we try to extend the TIM IE if there
  20545. ++ * is unicast buffered for stations with aid > 7 and fill it
  20546. ++ * in ourselves.
  20547. ++ */
  20548. ++static void ath10k_wmi_update_tim(struct ath10k *ar,
  20549. ++ struct ath10k_vif *arvif,
  20550. ++ struct sk_buff *bcn,
  20551. ++ const struct wmi_tim_info *tim_info)
  20552. ++{
  20553. ++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  20554. ++ struct ieee80211_tim_ie *tim;
  20555. ++ u8 *ies, *ie;
  20556. ++ u8 ie_len, pvm_len;
  20557. ++ __le32 t;
  20558. ++ u32 v;
  20559. ++
  20560. ++ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  20561. ++ * we must copy the bitmap upon change and reuse it later */
  20562. ++ if (__le32_to_cpu(tim_info->tim_changed)) {
  20563. ++ int i;
  20564. ++
  20565. ++ BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  20566. ++ sizeof(tim_info->tim_bitmap));
  20567. ++
  20568. ++ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  20569. ++ t = tim_info->tim_bitmap[i / 4];
  20570. ++ v = __le32_to_cpu(t);
  20571. ++ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  20572. ++ }
  20573. ++
  20574. ++ /* FW reports either length 0 or 16
  20575. ++ * so we calculate this on our own */
  20576. ++ arvif->u.ap.tim_len = 0;
  20577. ++ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  20578. ++ if (arvif->u.ap.tim_bitmap[i])
  20579. ++ arvif->u.ap.tim_len = i;
  20580. ++
  20581. ++ arvif->u.ap.tim_len++;
  20582. ++ }
  20583. ++
  20584. ++ ies = bcn->data;
  20585. ++ ies += ieee80211_hdrlen(hdr->frame_control);
  20586. ++ ies += 12; /* fixed parameters */
  20587. ++
  20588. ++ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  20589. ++ (u8 *)skb_tail_pointer(bcn) - ies);
  20590. ++ if (!ie) {
  20591. ++ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  20592. ++ ath10k_warn(ar, "no tim ie found;\n");
  20593. ++ return;
  20594. ++ }
  20595. ++
  20596. ++ tim = (void *)ie + 2;
  20597. ++ ie_len = ie[1];
  20598. ++ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  20599. ++
  20600. ++ if (pvm_len < arvif->u.ap.tim_len) {
  20601. ++ int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  20602. ++ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  20603. ++ void *next_ie = ie + 2 + ie_len;
  20604. ++
  20605. ++ if (skb_put(bcn, expand_size)) {
  20606. ++ memmove(next_ie + expand_size, next_ie, move_size);
  20607. ++
  20608. ++ ie[1] += expand_size;
  20609. ++ ie_len += expand_size;
  20610. ++ pvm_len += expand_size;
  20611. ++ } else {
  20612. ++ ath10k_warn(ar, "tim expansion failed\n");
  20613. ++ }
  20614. ++ }
  20615. ++
  20616. ++ if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  20617. ++ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
  20618. ++ return;
  20619. ++ }
  20620. ++
  20621. ++ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
  20622. ++ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  20623. ++
  20624. ++ if (tim->dtim_count == 0) {
  20625. ++ ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  20626. ++
  20627. ++ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
  20628. ++ ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  20629. ++ }
  20630. ++
  20631. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  20632. ++ tim->dtim_count, tim->dtim_period,
  20633. ++ tim->bitmap_ctrl, pvm_len);
  20634. + }
  20635. +
  20636. + static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  20637. +- struct wmi_p2p_noa_info *noa)
  20638. ++ const struct wmi_p2p_noa_info *noa)
  20639. + {
  20640. + struct ieee80211_p2p_noa_attr *noa_attr;
  20641. + u8 ctwindow_oppps = noa->ctwindow_oppps;
  20642. +@@ -1287,14 +2319,13 @@ static void ath10k_p2p_fill_noa_ie(u8 *d
  20643. + *noa_attr_len = __cpu_to_le16(attr_len);
  20644. + }
  20645. +
  20646. +-static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
  20647. ++static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
  20648. + {
  20649. + u32 len = 0;
  20650. + u8 noa_descriptors = noa->num_descriptors;
  20651. + u8 opp_ps_info = noa->ctwindow_oppps;
  20652. + bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  20653. +
  20654. +-
  20655. + if (!noa_descriptors && !opps_enabled)
  20656. + return len;
  20657. +
  20658. +@@ -1308,16 +2339,15 @@ static u32 ath10k_p2p_calc_noa_ie_len(st
  20659. +
  20660. + static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  20661. + struct sk_buff *bcn,
  20662. +- struct wmi_bcn_info *bcn_info)
  20663. ++ const struct wmi_p2p_noa_info *noa)
  20664. + {
  20665. +- struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
  20666. + u8 *new_data, *old_data = arvif->u.ap.noa_data;
  20667. + u32 new_len;
  20668. +
  20669. + if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  20670. + return;
  20671. +
  20672. +- ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  20673. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  20674. + if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  20675. + new_len = ath10k_p2p_calc_noa_ie_len(noa);
  20676. + if (!new_len)
  20677. +@@ -1351,22 +2381,59 @@ cleanup:
  20678. + kfree(old_data);
  20679. + }
  20680. +
  20681. ++static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
  20682. ++ struct wmi_swba_ev_arg *arg)
  20683. ++{
  20684. ++ struct wmi_host_swba_event *ev = (void *)skb->data;
  20685. ++ u32 map;
  20686. ++ size_t i;
  20687. ++
  20688. ++ if (skb->len < sizeof(*ev))
  20689. ++ return -EPROTO;
  20690. ++
  20691. ++ skb_pull(skb, sizeof(*ev));
  20692. ++ arg->vdev_map = ev->vdev_map;
  20693. ++
  20694. ++ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
  20695. ++ if (!(map & BIT(0)))
  20696. ++ continue;
  20697. ++
  20698. ++ /* If this happens there were some changes in firmware and
  20699. ++ * ath10k should update the max size of tim_info array.
  20700. ++ */
  20701. ++ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
  20702. ++ break;
  20703. ++
  20704. ++ arg->tim_info[i] = &ev->bcn_info[i].tim_info;
  20705. ++ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
  20706. ++ i++;
  20707. ++ }
  20708. ++
  20709. ++ return 0;
  20710. ++}
  20711. +
  20712. +-static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  20713. ++void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  20714. + {
  20715. +- struct wmi_host_swba_event *ev;
  20716. ++ struct wmi_swba_ev_arg arg = {};
  20717. + u32 map;
  20718. + int i = -1;
  20719. +- struct wmi_bcn_info *bcn_info;
  20720. ++ const struct wmi_tim_info *tim_info;
  20721. ++ const struct wmi_p2p_noa_info *noa_info;
  20722. + struct ath10k_vif *arvif;
  20723. + struct sk_buff *bcn;
  20724. ++ dma_addr_t paddr;
  20725. + int ret, vdev_id = 0;
  20726. +
  20727. +- ev = (struct wmi_host_swba_event *)skb->data;
  20728. +- map = __le32_to_cpu(ev->vdev_map);
  20729. ++ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
  20730. ++ if (ret) {
  20731. ++ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
  20732. ++ return;
  20733. ++ }
  20734. ++
  20735. ++ map = __le32_to_cpu(arg.vdev_map);
  20736. +
  20737. +- ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  20738. +- ev->vdev_map);
  20739. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  20740. ++ map);
  20741. +
  20742. + for (; map; map >>= 1, vdev_id++) {
  20743. + if (!(map & 0x1))
  20744. +@@ -1375,27 +2442,29 @@ static void ath10k_wmi_event_host_swba(s
  20745. + i++;
  20746. +
  20747. + if (i >= WMI_MAX_AP_VDEV) {
  20748. +- ath10k_warn("swba has corrupted vdev map\n");
  20749. ++ ath10k_warn(ar, "swba has corrupted vdev map\n");
  20750. + break;
  20751. + }
  20752. +
  20753. +- bcn_info = &ev->bcn_info[i];
  20754. ++ tim_info = arg.tim_info[i];
  20755. ++ noa_info = arg.noa_info[i];
  20756. +
  20757. +- ath10k_dbg(ATH10K_DBG_MGMT,
  20758. ++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
  20759. + "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
  20760. + i,
  20761. +- __le32_to_cpu(bcn_info->tim_info.tim_len),
  20762. +- __le32_to_cpu(bcn_info->tim_info.tim_mcast),
  20763. +- __le32_to_cpu(bcn_info->tim_info.tim_changed),
  20764. +- __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
  20765. +- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
  20766. +- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
  20767. +- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
  20768. +- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
  20769. ++ __le32_to_cpu(tim_info->tim_len),
  20770. ++ __le32_to_cpu(tim_info->tim_mcast),
  20771. ++ __le32_to_cpu(tim_info->tim_changed),
  20772. ++ __le32_to_cpu(tim_info->tim_num_ps_pending),
  20773. ++ __le32_to_cpu(tim_info->tim_bitmap[3]),
  20774. ++ __le32_to_cpu(tim_info->tim_bitmap[2]),
  20775. ++ __le32_to_cpu(tim_info->tim_bitmap[1]),
  20776. ++ __le32_to_cpu(tim_info->tim_bitmap[0]));
  20777. +
  20778. + arvif = ath10k_get_arvif(ar, vdev_id);
  20779. + if (arvif == NULL) {
  20780. +- ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
  20781. ++ ath10k_warn(ar, "no vif for vdev_id %d found\n",
  20782. ++ vdev_id);
  20783. + continue;
  20784. + }
  20785. +
  20786. +@@ -1412,57 +2481,77 @@ static void ath10k_wmi_event_host_swba(s
  20787. +
  20788. + bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  20789. + if (!bcn) {
  20790. +- ath10k_warn("could not get mac80211 beacon\n");
  20791. ++ ath10k_warn(ar, "could not get mac80211 beacon\n");
  20792. + continue;
  20793. + }
  20794. +
  20795. +- ath10k_tx_h_seq_no(bcn);
  20796. +- ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
  20797. +- ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
  20798. ++ ath10k_tx_h_seq_no(arvif->vif, bcn);
  20799. ++ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
  20800. ++ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
  20801. +
  20802. + spin_lock_bh(&ar->data_lock);
  20803. +
  20804. + if (arvif->beacon) {
  20805. +- if (!arvif->beacon_sent)
  20806. +- ath10k_warn("SWBA overrun on vdev %d\n",
  20807. ++ switch (arvif->beacon_state) {
  20808. ++ case ATH10K_BEACON_SENT:
  20809. ++ break;
  20810. ++ case ATH10K_BEACON_SCHEDULED:
  20811. ++ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
  20812. + arvif->vdev_id);
  20813. ++ break;
  20814. ++ case ATH10K_BEACON_SENDING:
  20815. ++ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
  20816. ++ arvif->vdev_id);
  20817. ++ dev_kfree_skb(bcn);
  20818. ++ goto skip;
  20819. ++ }
  20820. +
  20821. +- dma_unmap_single(arvif->ar->dev,
  20822. +- ATH10K_SKB_CB(arvif->beacon)->paddr,
  20823. +- arvif->beacon->len, DMA_TO_DEVICE);
  20824. +- dev_kfree_skb_any(arvif->beacon);
  20825. +- arvif->beacon = NULL;
  20826. ++ ath10k_mac_vif_beacon_free(arvif);
  20827. + }
  20828. +
  20829. +- ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
  20830. +- bcn->data, bcn->len,
  20831. +- DMA_TO_DEVICE);
  20832. +- ret = dma_mapping_error(arvif->ar->dev,
  20833. +- ATH10K_SKB_CB(bcn)->paddr);
  20834. +- if (ret) {
  20835. +- ath10k_warn("failed to map beacon: %d\n", ret);
  20836. +- dev_kfree_skb_any(bcn);
  20837. +- goto skip;
  20838. ++ if (!arvif->beacon_buf) {
  20839. ++ paddr = dma_map_single(arvif->ar->dev, bcn->data,
  20840. ++ bcn->len, DMA_TO_DEVICE);
  20841. ++ ret = dma_mapping_error(arvif->ar->dev, paddr);
  20842. ++ if (ret) {
  20843. ++ ath10k_warn(ar, "failed to map beacon: %d\n",
  20844. ++ ret);
  20845. ++ dev_kfree_skb_any(bcn);
  20846. ++ goto skip;
  20847. ++ }
  20848. ++
  20849. ++ ATH10K_SKB_CB(bcn)->paddr = paddr;
  20850. ++ } else {
  20851. ++ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
  20852. ++ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
  20853. ++ bcn->len, IEEE80211_MAX_FRAME_LEN);
  20854. ++ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
  20855. ++ }
  20856. ++ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
  20857. ++ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
  20858. + }
  20859. +
  20860. + arvif->beacon = bcn;
  20861. +- arvif->beacon_sent = false;
  20862. ++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
  20863. ++
  20864. ++ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
  20865. ++ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
  20866. +
  20867. +- ath10k_wmi_tx_beacon_nowait(arvif);
  20868. + skip:
  20869. + spin_unlock_bh(&ar->data_lock);
  20870. + }
  20871. ++
  20872. ++ ath10k_wmi_tx_beacons_nowait(ar);
  20873. + }
  20874. +
  20875. +-static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
  20876. +- struct sk_buff *skb)
  20877. ++void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
  20878. + {
  20879. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  20880. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  20881. + }
  20882. +
  20883. + static void ath10k_dfs_radar_report(struct ath10k *ar,
  20884. +- struct wmi_single_phyerr_rx_event *event,
  20885. +- struct phyerr_radar_report *rr,
  20886. ++ const struct wmi_phyerr *phyerr,
  20887. ++ const struct phyerr_radar_report *rr,
  20888. + u64 tsf)
  20889. + {
  20890. + u32 reg0, reg1, tsf32l;
  20891. +@@ -1473,20 +2562,20 @@ static void ath10k_dfs_radar_report(stru
  20892. + reg0 = __le32_to_cpu(rr->reg0);
  20893. + reg1 = __le32_to_cpu(rr->reg1);
  20894. +
  20895. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20896. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20897. + "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
  20898. + MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
  20899. + MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
  20900. + MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
  20901. + MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
  20902. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20903. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20904. + "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
  20905. + MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
  20906. + MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
  20907. + MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
  20908. + MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
  20909. + MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
  20910. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20911. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20912. + "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
  20913. + MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
  20914. + MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
  20915. +@@ -1495,12 +2584,12 @@ static void ath10k_dfs_radar_report(stru
  20916. + return;
  20917. +
  20918. + /* report event to DFS pattern detector */
  20919. +- tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
  20920. ++ tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
  20921. + tsf64 = tsf & (~0xFFFFFFFFULL);
  20922. + tsf64 |= tsf32l;
  20923. +
  20924. + width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
  20925. +- rssi = event->hdr.rssi_combined;
  20926. ++ rssi = phyerr->rssi_combined;
  20927. +
  20928. + /* hardware store this as 8 bit signed value,
  20929. + * set to zero if negative number
  20930. +@@ -1513,25 +2602,25 @@ static void ath10k_dfs_radar_report(stru
  20931. + pe.width = width;
  20932. + pe.rssi = rssi;
  20933. +
  20934. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20935. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20936. + "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
  20937. + pe.freq, pe.width, pe.rssi, pe.ts);
  20938. +
  20939. + ATH10K_DFS_STAT_INC(ar, pulses_detected);
  20940. +
  20941. + if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
  20942. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20943. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20944. + "dfs no pulse pattern detected, yet\n");
  20945. + return;
  20946. + }
  20947. +
  20948. +- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  20949. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  20950. + ATH10K_DFS_STAT_INC(ar, radar_detected);
  20951. +
  20952. + /* Control radar events reporting in debugfs file
  20953. + dfs_block_radar_events */
  20954. + if (ar->dfs_block_radar_events) {
  20955. +- ath10k_info("DFS Radar detected, but ignored as requested\n");
  20956. ++ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
  20957. + return;
  20958. + }
  20959. +
  20960. +@@ -1539,8 +2628,8 @@ static void ath10k_dfs_radar_report(stru
  20961. + }
  20962. +
  20963. + static int ath10k_dfs_fft_report(struct ath10k *ar,
  20964. +- struct wmi_single_phyerr_rx_event *event,
  20965. +- struct phyerr_fft_report *fftr,
  20966. ++ const struct wmi_phyerr *phyerr,
  20967. ++ const struct phyerr_fft_report *fftr,
  20968. + u64 tsf)
  20969. + {
  20970. + u32 reg0, reg1;
  20971. +@@ -1548,15 +2637,15 @@ static int ath10k_dfs_fft_report(struct
  20972. +
  20973. + reg0 = __le32_to_cpu(fftr->reg0);
  20974. + reg1 = __le32_to_cpu(fftr->reg1);
  20975. +- rssi = event->hdr.rssi_combined;
  20976. ++ rssi = phyerr->rssi_combined;
  20977. +
  20978. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20979. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20980. + "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
  20981. + MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
  20982. + MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
  20983. + MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
  20984. + MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
  20985. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  20986. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  20987. + "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
  20988. + MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
  20989. + MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
  20990. +@@ -1568,7 +2657,7 @@ static int ath10k_dfs_fft_report(struct
  20991. + /* false event detection */
  20992. + if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
  20993. + peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
  20994. +- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  20995. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  20996. + ATH10K_DFS_STAT_INC(ar, pulses_discarded);
  20997. + return -EINVAL;
  20998. + }
  20999. +@@ -1576,21 +2665,21 @@ static int ath10k_dfs_fft_report(struct
  21000. + return 0;
  21001. + }
  21002. +
  21003. +-static void ath10k_wmi_event_dfs(struct ath10k *ar,
  21004. +- struct wmi_single_phyerr_rx_event *event,
  21005. +- u64 tsf)
  21006. ++void ath10k_wmi_event_dfs(struct ath10k *ar,
  21007. ++ const struct wmi_phyerr *phyerr,
  21008. ++ u64 tsf)
  21009. + {
  21010. + int buf_len, tlv_len, res, i = 0;
  21011. +- struct phyerr_tlv *tlv;
  21012. +- struct phyerr_radar_report *rr;
  21013. +- struct phyerr_fft_report *fftr;
  21014. +- u8 *tlv_buf;
  21015. ++ const struct phyerr_tlv *tlv;
  21016. ++ const struct phyerr_radar_report *rr;
  21017. ++ const struct phyerr_fft_report *fftr;
  21018. ++ const u8 *tlv_buf;
  21019. +
  21020. +- buf_len = __le32_to_cpu(event->hdr.buf_len);
  21021. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  21022. ++ buf_len = __le32_to_cpu(phyerr->buf_len);
  21023. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  21024. + "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
  21025. +- event->hdr.phy_err_code, event->hdr.rssi_combined,
  21026. +- __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
  21027. ++ phyerr->phy_err_code, phyerr->rssi_combined,
  21028. ++ __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
  21029. +
  21030. + /* Skip event if DFS disabled */
  21031. + if (!config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED))
  21032. +@@ -1600,36 +2689,38 @@ static void ath10k_wmi_event_dfs(struct
  21033. +
  21034. + while (i < buf_len) {
  21035. + if (i + sizeof(*tlv) > buf_len) {
  21036. +- ath10k_warn("too short buf for tlv header (%d)\n", i);
  21037. ++ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
  21038. ++ i);
  21039. + return;
  21040. + }
  21041. +
  21042. +- tlv = (struct phyerr_tlv *)&event->bufp[i];
  21043. ++ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  21044. + tlv_len = __le16_to_cpu(tlv->len);
  21045. +- tlv_buf = &event->bufp[i + sizeof(*tlv)];
  21046. +- ath10k_dbg(ATH10K_DBG_REGULATORY,
  21047. ++ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  21048. ++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  21049. + "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
  21050. + tlv_len, tlv->tag, tlv->sig);
  21051. +
  21052. + switch (tlv->tag) {
  21053. + case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
  21054. + if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
  21055. +- ath10k_warn("too short radar pulse summary (%d)\n",
  21056. ++ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
  21057. + i);
  21058. + return;
  21059. + }
  21060. +
  21061. + rr = (struct phyerr_radar_report *)tlv_buf;
  21062. +- ath10k_dfs_radar_report(ar, event, rr, tsf);
  21063. ++ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
  21064. + break;
  21065. + case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  21066. + if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
  21067. +- ath10k_warn("too short fft report (%d)\n", i);
  21068. ++ ath10k_warn(ar, "too short fft report (%d)\n",
  21069. ++ i);
  21070. + return;
  21071. + }
  21072. +
  21073. + fftr = (struct phyerr_fft_report *)tlv_buf;
  21074. +- res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
  21075. ++ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
  21076. + if (res)
  21077. + return;
  21078. + break;
  21079. +@@ -1639,58 +2730,122 @@ static void ath10k_wmi_event_dfs(struct
  21080. + }
  21081. + }
  21082. +
  21083. +-static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  21084. +- struct wmi_single_phyerr_rx_event *event,
  21085. +- u64 tsf)
  21086. +-{
  21087. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n");
  21088. +-}
  21089. +-
  21090. +-static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  21091. ++void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  21092. ++ const struct wmi_phyerr *phyerr,
  21093. ++ u64 tsf)
  21094. + {
  21095. +- struct wmi_comb_phyerr_rx_event *comb_event;
  21096. +- struct wmi_single_phyerr_rx_event *event;
  21097. +- u32 count, i, buf_len, phy_err_code;
  21098. +- u64 tsf;
  21099. +- int left_len = skb->len;
  21100. +-
  21101. +- ATH10K_DFS_STAT_INC(ar, phy_errors);
  21102. +-
  21103. +- /* Check if combined event available */
  21104. +- if (left_len < sizeof(*comb_event)) {
  21105. +- ath10k_warn("wmi phyerr combined event wrong len\n");
  21106. +- return;
  21107. +- }
  21108. +-
  21109. +- left_len -= sizeof(*comb_event);
  21110. ++ int buf_len, tlv_len, res, i = 0;
  21111. ++ struct phyerr_tlv *tlv;
  21112. ++ const void *tlv_buf;
  21113. ++ const struct phyerr_fft_report *fftr;
  21114. ++ size_t fftr_len;
  21115. +
  21116. +- /* Check number of included events */
  21117. +- comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
  21118. +- count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
  21119. ++ buf_len = __le32_to_cpu(phyerr->buf_len);
  21120. +
  21121. +- tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
  21122. +- tsf <<= 32;
  21123. +- tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
  21124. ++ while (i < buf_len) {
  21125. ++ if (i + sizeof(*tlv) > buf_len) {
  21126. ++ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
  21127. ++ i);
  21128. ++ return;
  21129. ++ }
  21130. +
  21131. +- ath10k_dbg(ATH10K_DBG_WMI,
  21132. +- "wmi event phyerr count %d tsf64 0x%llX\n",
  21133. +- count, tsf);
  21134. ++ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  21135. ++ tlv_len = __le16_to_cpu(tlv->len);
  21136. ++ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  21137. +
  21138. +- event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
  21139. +- for (i = 0; i < count; i++) {
  21140. +- /* Check if we can read event header */
  21141. +- if (left_len < sizeof(*event)) {
  21142. +- ath10k_warn("single event (%d) wrong head len\n", i);
  21143. ++ if (i + sizeof(*tlv) + tlv_len > buf_len) {
  21144. ++ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
  21145. ++ i);
  21146. + return;
  21147. + }
  21148. +
  21149. +- left_len -= sizeof(*event);
  21150. +-
  21151. +- buf_len = __le32_to_cpu(event->hdr.buf_len);
  21152. +- phy_err_code = event->hdr.phy_err_code;
  21153. ++ switch (tlv->tag) {
  21154. ++ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  21155. ++ if (sizeof(*fftr) > tlv_len) {
  21156. ++ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
  21157. ++ i);
  21158. ++ return;
  21159. ++ }
  21160. ++
  21161. ++ fftr_len = tlv_len - sizeof(*fftr);
  21162. ++ fftr = tlv_buf;
  21163. ++ res = ath10k_spectral_process_fft(ar, phyerr,
  21164. ++ fftr, fftr_len,
  21165. ++ tsf);
  21166. ++ if (res < 0) {
  21167. ++ ath10k_warn(ar, "failed to process fft report: %d\n",
  21168. ++ res);
  21169. ++ return;
  21170. ++ }
  21171. ++ break;
  21172. ++ }
  21173. ++
  21174. ++ i += sizeof(*tlv) + tlv_len;
  21175. ++ }
  21176. ++}
  21177. ++
  21178. ++static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
  21179. ++ struct wmi_phyerr_ev_arg *arg)
  21180. ++{
  21181. ++ struct wmi_phyerr_event *ev = (void *)skb->data;
  21182. ++
  21183. ++ if (skb->len < sizeof(*ev))
  21184. ++ return -EPROTO;
  21185. ++
  21186. ++ arg->num_phyerrs = ev->num_phyerrs;
  21187. ++ arg->tsf_l32 = ev->tsf_l32;
  21188. ++ arg->tsf_u32 = ev->tsf_u32;
  21189. ++ arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
  21190. ++ arg->phyerrs = ev->phyerrs;
  21191. ++
  21192. ++ return 0;
  21193. ++}
  21194. ++
  21195. ++void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  21196. ++{
  21197. ++ struct wmi_phyerr_ev_arg arg = {};
  21198. ++ const struct wmi_phyerr *phyerr;
  21199. ++ u32 count, i, buf_len, phy_err_code;
  21200. ++ u64 tsf;
  21201. ++ int left_len, ret;
  21202. ++
  21203. ++ ATH10K_DFS_STAT_INC(ar, phy_errors);
  21204. ++
  21205. ++ ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
  21206. ++ if (ret) {
  21207. ++ ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
  21208. ++ return;
  21209. ++ }
  21210. ++
  21211. ++ left_len = __le32_to_cpu(arg.buf_len);
  21212. ++
  21213. ++ /* Check number of included events */
  21214. ++ count = __le32_to_cpu(arg.num_phyerrs);
  21215. ++
  21216. ++ tsf = __le32_to_cpu(arg.tsf_u32);
  21217. ++ tsf <<= 32;
  21218. ++ tsf |= __le32_to_cpu(arg.tsf_l32);
  21219. ++
  21220. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  21221. ++ "wmi event phyerr count %d tsf64 0x%llX\n",
  21222. ++ count, tsf);
  21223. ++
  21224. ++ phyerr = arg.phyerrs;
  21225. ++ for (i = 0; i < count; i++) {
  21226. ++ /* Check if we can read event header */
  21227. ++ if (left_len < sizeof(*phyerr)) {
  21228. ++ ath10k_warn(ar, "single event (%d) wrong head len\n",
  21229. ++ i);
  21230. ++ return;
  21231. ++ }
  21232. ++
  21233. ++ left_len -= sizeof(*phyerr);
  21234. ++
  21235. ++ buf_len = __le32_to_cpu(phyerr->buf_len);
  21236. ++ phy_err_code = phyerr->phy_err_code;
  21237. +
  21238. + if (left_len < buf_len) {
  21239. +- ath10k_warn("single event (%d) wrong buf len\n", i);
  21240. ++ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
  21241. + return;
  21242. + }
  21243. +
  21244. +@@ -1698,36 +2853,34 @@ static void ath10k_wmi_event_phyerr(stru
  21245. +
  21246. + switch (phy_err_code) {
  21247. + case PHY_ERROR_RADAR:
  21248. +- ath10k_wmi_event_dfs(ar, event, tsf);
  21249. ++ ath10k_wmi_event_dfs(ar, phyerr, tsf);
  21250. + break;
  21251. + case PHY_ERROR_SPECTRAL_SCAN:
  21252. +- ath10k_wmi_event_spectral_scan(ar, event, tsf);
  21253. ++ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  21254. + break;
  21255. + case PHY_ERROR_FALSE_RADAR_EXT:
  21256. +- ath10k_wmi_event_dfs(ar, event, tsf);
  21257. +- ath10k_wmi_event_spectral_scan(ar, event, tsf);
  21258. ++ ath10k_wmi_event_dfs(ar, phyerr, tsf);
  21259. ++ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  21260. + break;
  21261. + default:
  21262. + break;
  21263. + }
  21264. +
  21265. +- event += sizeof(*event) + buf_len;
  21266. ++ phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
  21267. + }
  21268. + }
  21269. +
  21270. +-static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  21271. ++void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  21272. + {
  21273. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  21274. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  21275. + }
  21276. +
  21277. +-static void ath10k_wmi_event_profile_match(struct ath10k *ar,
  21278. +- struct sk_buff *skb)
  21279. ++void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
  21280. + {
  21281. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  21282. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  21283. + }
  21284. +
  21285. +-static void ath10k_wmi_event_debug_print(struct ath10k *ar,
  21286. +- struct sk_buff *skb)
  21287. ++void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
  21288. + {
  21289. + char buf[101], c;
  21290. + int i;
  21291. +@@ -1748,7 +2901,7 @@ static void ath10k_wmi_event_debug_print
  21292. + }
  21293. +
  21294. + if (i == sizeof(buf) - 1)
  21295. +- ath10k_warn("wmi debug print truncated: %d\n", skb->len);
  21296. ++ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
  21297. +
  21298. + /* for some reason the debug prints end with \n, remove that */
  21299. + if (skb->data[i - 1] == '\n')
  21300. +@@ -1757,112 +2910,99 @@ static void ath10k_wmi_event_debug_print
  21301. + /* the last byte is always reserved for the null character */
  21302. + buf[i] = '\0';
  21303. +
  21304. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
  21305. ++ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
  21306. + }
  21307. +
  21308. +-static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  21309. ++void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  21310. + {
  21311. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  21312. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  21313. + }
  21314. +
  21315. +-static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
  21316. +- struct sk_buff *skb)
  21317. ++void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
  21318. + {
  21319. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  21320. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  21321. + }
  21322. +
  21323. +-static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  21324. ++void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  21325. + struct sk_buff *skb)
  21326. + {
  21327. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  21328. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  21329. + }
  21330. +
  21331. +-static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  21332. ++void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  21333. + struct sk_buff *skb)
  21334. + {
  21335. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  21336. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  21337. + }
  21338. +
  21339. +-static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
  21340. +- struct sk_buff *skb)
  21341. ++void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
  21342. + {
  21343. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  21344. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  21345. + }
  21346. +
  21347. +-static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
  21348. +- struct sk_buff *skb)
  21349. ++void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
  21350. + {
  21351. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  21352. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  21353. + }
  21354. +
  21355. +-static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
  21356. +- struct sk_buff *skb)
  21357. ++void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
  21358. + {
  21359. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  21360. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  21361. + }
  21362. +
  21363. +-static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
  21364. +- struct sk_buff *skb)
  21365. ++void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
  21366. + {
  21367. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  21368. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  21369. + }
  21370. +
  21371. +-static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
  21372. +- struct sk_buff *skb)
  21373. ++void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
  21374. + {
  21375. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  21376. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  21377. + }
  21378. +
  21379. +-static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  21380. +- struct sk_buff *skb)
  21381. ++void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
  21382. + {
  21383. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  21384. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  21385. + }
  21386. +
  21387. +-static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
  21388. +- struct sk_buff *skb)
  21389. ++void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
  21390. + {
  21391. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  21392. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  21393. + }
  21394. +
  21395. +-static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
  21396. +- struct sk_buff *skb)
  21397. ++void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
  21398. + {
  21399. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  21400. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  21401. + }
  21402. +
  21403. +-static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
  21404. +- struct sk_buff *skb)
  21405. ++void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
  21406. + {
  21407. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  21408. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  21409. + }
  21410. +
  21411. +-static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  21412. ++void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  21413. + struct sk_buff *skb)
  21414. + {
  21415. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  21416. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  21417. + }
  21418. +
  21419. +-static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
  21420. +- struct sk_buff *skb)
  21421. ++void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
  21422. + {
  21423. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  21424. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  21425. + }
  21426. +
  21427. +-static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
  21428. +- struct sk_buff *skb)
  21429. ++void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
  21430. + {
  21431. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  21432. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  21433. + }
  21434. +
  21435. +-static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
  21436. +- struct sk_buff *skb)
  21437. ++void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
  21438. + {
  21439. +- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  21440. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  21441. + }
  21442. +
  21443. + static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
  21444. +- u32 num_units, u32 unit_len)
  21445. ++ u32 num_units, u32 unit_len)
  21446. + {
  21447. + dma_addr_t paddr;
  21448. + u32 pool_size;
  21449. +@@ -1878,7 +3018,7 @@ static int ath10k_wmi_alloc_host_mem(str
  21450. + &paddr,
  21451. + GFP_ATOMIC);
  21452. + if (!ar->wmi.mem_chunks[idx].vaddr) {
  21453. +- ath10k_warn("failed to allocate memory chunk\n");
  21454. ++ ath10k_warn(ar, "failed to allocate memory chunk\n");
  21455. + return -ENOMEM;
  21456. + }
  21457. +
  21458. +@@ -1892,45 +3032,124 @@ static int ath10k_wmi_alloc_host_mem(str
  21459. + return 0;
  21460. + }
  21461. +
  21462. +-static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
  21463. +- struct sk_buff *skb)
  21464. ++static int
  21465. ++ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  21466. ++ struct wmi_svc_rdy_ev_arg *arg)
  21467. ++{
  21468. ++ struct wmi_service_ready_event *ev;
  21469. ++ size_t i, n;
  21470. ++
  21471. ++ if (skb->len < sizeof(*ev))
  21472. ++ return -EPROTO;
  21473. ++
  21474. ++ ev = (void *)skb->data;
  21475. ++ skb_pull(skb, sizeof(*ev));
  21476. ++ arg->min_tx_power = ev->hw_min_tx_power;
  21477. ++ arg->max_tx_power = ev->hw_max_tx_power;
  21478. ++ arg->ht_cap = ev->ht_cap_info;
  21479. ++ arg->vht_cap = ev->vht_cap_info;
  21480. ++ arg->sw_ver0 = ev->sw_version;
  21481. ++ arg->sw_ver1 = ev->sw_version_1;
  21482. ++ arg->phy_capab = ev->phy_capability;
  21483. ++ arg->num_rf_chains = ev->num_rf_chains;
  21484. ++ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  21485. ++ arg->num_mem_reqs = ev->num_mem_reqs;
  21486. ++ arg->service_map = ev->wmi_service_bitmap;
  21487. ++ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  21488. ++
  21489. ++ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  21490. ++ ARRAY_SIZE(arg->mem_reqs));
  21491. ++ for (i = 0; i < n; i++)
  21492. ++ arg->mem_reqs[i] = &ev->mem_reqs[i];
  21493. ++
  21494. ++ if (skb->len <
  21495. ++ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  21496. ++ return -EPROTO;
  21497. ++
  21498. ++ return 0;
  21499. ++}
  21500. ++
  21501. ++static int
  21502. ++ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  21503. ++ struct wmi_svc_rdy_ev_arg *arg)
  21504. ++{
  21505. ++ struct wmi_10x_service_ready_event *ev;
  21506. ++ int i, n;
  21507. ++
  21508. ++ if (skb->len < sizeof(*ev))
  21509. ++ return -EPROTO;
  21510. ++
  21511. ++ ev = (void *)skb->data;
  21512. ++ skb_pull(skb, sizeof(*ev));
  21513. ++ arg->min_tx_power = ev->hw_min_tx_power;
  21514. ++ arg->max_tx_power = ev->hw_max_tx_power;
  21515. ++ arg->ht_cap = ev->ht_cap_info;
  21516. ++ arg->vht_cap = ev->vht_cap_info;
  21517. ++ arg->sw_ver0 = ev->sw_version;
  21518. ++ arg->phy_capab = ev->phy_capability;
  21519. ++ arg->num_rf_chains = ev->num_rf_chains;
  21520. ++ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  21521. ++ arg->num_mem_reqs = ev->num_mem_reqs;
  21522. ++ arg->service_map = ev->wmi_service_bitmap;
  21523. ++ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  21524. ++
  21525. ++ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  21526. ++ ARRAY_SIZE(arg->mem_reqs));
  21527. ++ for (i = 0; i < n; i++)
  21528. ++ arg->mem_reqs[i] = &ev->mem_reqs[i];
  21529. ++
  21530. ++ if (skb->len <
  21531. ++ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  21532. ++ return -EPROTO;
  21533. ++
  21534. ++ return 0;
  21535. ++}
  21536. ++
  21537. ++void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
  21538. + {
  21539. +- struct wmi_service_ready_event *ev = (void *)skb->data;
  21540. ++ struct wmi_svc_rdy_ev_arg arg = {};
  21541. ++ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  21542. ++ int ret;
  21543. +
  21544. +- if (skb->len < sizeof(*ev)) {
  21545. +- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
  21546. +- skb->len, sizeof(*ev));
  21547. ++ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
  21548. ++ if (ret) {
  21549. ++ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
  21550. + return;
  21551. + }
  21552. +
  21553. +- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
  21554. +- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
  21555. +- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
  21556. +- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
  21557. ++ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
  21558. ++ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
  21559. ++ arg.service_map_len);
  21560. ++
  21561. ++ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
  21562. ++ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
  21563. ++ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
  21564. ++ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
  21565. + ar->fw_version_major =
  21566. +- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
  21567. +- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
  21568. ++ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
  21569. ++ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
  21570. + ar->fw_version_release =
  21571. +- (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
  21572. +- ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
  21573. +- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
  21574. +- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
  21575. ++ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
  21576. ++ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
  21577. ++ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
  21578. ++ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
  21579. ++ ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
  21580. ++
  21581. ++ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  21582. ++ arg.service_map, arg.service_map_len);
  21583. +
  21584. + /* only manually set fw features when not using FW IE format */
  21585. + if (ar->fw_api == 1 && ar->fw_version_build > 636)
  21586. + set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
  21587. +
  21588. + if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  21589. +- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
  21590. ++ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  21591. + ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  21592. + ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  21593. + }
  21594. +
  21595. +- ar->ath_common.regulatory.current_rd =
  21596. +- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
  21597. +-
  21598. +- ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
  21599. +- sizeof(ev->wmi_service_bitmap));
  21600. ++ ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
  21601. ++ ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
  21602. +
  21603. + if (strlen(ar->hw->wiphy->fw_version) == 0) {
  21604. + snprintf(ar->hw->wiphy->fw_version,
  21605. +@@ -1942,90 +3161,18 @@ static void ath10k_wmi_service_ready_eve
  21606. + ar->fw_version_build);
  21607. + }
  21608. +
  21609. +- /* FIXME: it probably should be better to support this */
  21610. +- if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
  21611. +- ath10k_warn("target requested %d memory chunks; ignoring\n",
  21612. +- __le32_to_cpu(ev->num_mem_reqs));
  21613. +- }
  21614. +-
  21615. +- ath10k_dbg(ATH10K_DBG_WMI,
  21616. +- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
  21617. +- __le32_to_cpu(ev->sw_version),
  21618. +- __le32_to_cpu(ev->sw_version_1),
  21619. +- __le32_to_cpu(ev->abi_version),
  21620. +- __le32_to_cpu(ev->phy_capability),
  21621. +- __le32_to_cpu(ev->ht_cap_info),
  21622. +- __le32_to_cpu(ev->vht_cap_info),
  21623. +- __le32_to_cpu(ev->vht_supp_mcs),
  21624. +- __le32_to_cpu(ev->sys_cap_info),
  21625. +- __le32_to_cpu(ev->num_mem_reqs),
  21626. +- __le32_to_cpu(ev->num_rf_chains));
  21627. +-
  21628. +- complete(&ar->wmi.service_ready);
  21629. +-}
  21630. +-
  21631. +-static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
  21632. +- struct sk_buff *skb)
  21633. +-{
  21634. +- u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  21635. +- int ret;
  21636. +- struct wmi_service_ready_event_10x *ev = (void *)skb->data;
  21637. +-
  21638. +- if (skb->len < sizeof(*ev)) {
  21639. +- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
  21640. +- skb->len, sizeof(*ev));
  21641. +- return;
  21642. +- }
  21643. +-
  21644. +- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
  21645. +- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
  21646. +- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
  21647. +- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
  21648. +- ar->fw_version_major =
  21649. +- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
  21650. +- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
  21651. +- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
  21652. +- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
  21653. +-
  21654. +- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  21655. +- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
  21656. +- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  21657. +- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  21658. +- }
  21659. +-
  21660. +- ar->ath_common.regulatory.current_rd =
  21661. +- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
  21662. +-
  21663. +- ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
  21664. +- sizeof(ev->wmi_service_bitmap));
  21665. +-
  21666. +- if (strlen(ar->hw->wiphy->fw_version) == 0) {
  21667. +- snprintf(ar->hw->wiphy->fw_version,
  21668. +- sizeof(ar->hw->wiphy->fw_version),
  21669. +- "%u.%u",
  21670. +- ar->fw_version_major,
  21671. +- ar->fw_version_minor);
  21672. +- }
  21673. +-
  21674. +- num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
  21675. +-
  21676. +- if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
  21677. +- ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
  21678. ++ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
  21679. ++ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
  21680. ++ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
  21681. + num_mem_reqs);
  21682. + return;
  21683. + }
  21684. +
  21685. +- if (!num_mem_reqs)
  21686. +- goto exit;
  21687. +-
  21688. +- ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
  21689. +- num_mem_reqs);
  21690. +-
  21691. + for (i = 0; i < num_mem_reqs; ++i) {
  21692. +- req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
  21693. +- num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
  21694. +- unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
  21695. +- num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
  21696. ++ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
  21697. ++ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
  21698. ++ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
  21699. ++ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
  21700. +
  21701. + if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
  21702. + /* number of units to allocate is number of
  21703. +@@ -2036,10 +3183,10 @@ static void ath10k_wmi_10x_service_ready
  21704. + else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
  21705. + num_units = TARGET_10X_NUM_VDEVS + 1;
  21706. +
  21707. +- ath10k_dbg(ATH10K_DBG_WMI,
  21708. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  21709. + "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
  21710. + req_id,
  21711. +- __le32_to_cpu(ev->mem_reqs[i].num_units),
  21712. ++ __le32_to_cpu(arg.mem_reqs[i]->num_units),
  21713. + num_unit_info,
  21714. + unit_size,
  21715. + num_units);
  21716. +@@ -2050,47 +3197,79 @@ static void ath10k_wmi_10x_service_ready
  21717. + return;
  21718. + }
  21719. +
  21720. +-exit:
  21721. +- ath10k_dbg(ATH10K_DBG_WMI,
  21722. +- "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
  21723. +- __le32_to_cpu(ev->sw_version),
  21724. +- __le32_to_cpu(ev->abi_version),
  21725. +- __le32_to_cpu(ev->phy_capability),
  21726. +- __le32_to_cpu(ev->ht_cap_info),
  21727. +- __le32_to_cpu(ev->vht_cap_info),
  21728. +- __le32_to_cpu(ev->vht_supp_mcs),
  21729. +- __le32_to_cpu(ev->sys_cap_info),
  21730. +- __le32_to_cpu(ev->num_mem_reqs),
  21731. +- __le32_to_cpu(ev->num_rf_chains));
  21732. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  21733. ++ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
  21734. ++ __le32_to_cpu(arg.min_tx_power),
  21735. ++ __le32_to_cpu(arg.max_tx_power),
  21736. ++ __le32_to_cpu(arg.ht_cap),
  21737. ++ __le32_to_cpu(arg.vht_cap),
  21738. ++ __le32_to_cpu(arg.sw_ver0),
  21739. ++ __le32_to_cpu(arg.sw_ver1),
  21740. ++ __le32_to_cpu(arg.fw_build),
  21741. ++ __le32_to_cpu(arg.phy_capab),
  21742. ++ __le32_to_cpu(arg.num_rf_chains),
  21743. ++ __le32_to_cpu(arg.eeprom_rd),
  21744. ++ __le32_to_cpu(arg.num_mem_reqs));
  21745. +
  21746. + complete(&ar->wmi.service_ready);
  21747. + }
  21748. +
  21749. +-static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
  21750. ++static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  21751. ++ struct wmi_rdy_ev_arg *arg)
  21752. + {
  21753. +- struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
  21754. ++ struct wmi_ready_event *ev = (void *)skb->data;
  21755. +
  21756. +- if (WARN_ON(skb->len < sizeof(*ev)))
  21757. +- return -EINVAL;
  21758. ++ if (skb->len < sizeof(*ev))
  21759. ++ return -EPROTO;
  21760. ++
  21761. ++ skb_pull(skb, sizeof(*ev));
  21762. ++ arg->sw_version = ev->sw_version;
  21763. ++ arg->abi_version = ev->abi_version;
  21764. ++ arg->status = ev->status;
  21765. ++ arg->mac_addr = ev->mac_addr.addr;
  21766. ++
  21767. ++ return 0;
  21768. ++}
  21769. ++
  21770. ++int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
  21771. ++{
  21772. ++ struct wmi_rdy_ev_arg arg = {};
  21773. ++ int ret;
  21774. +
  21775. +- memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
  21776. ++ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
  21777. ++ if (ret) {
  21778. ++ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
  21779. ++ return ret;
  21780. ++ }
  21781. +
  21782. +- ath10k_dbg(ATH10K_DBG_WMI,
  21783. +- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
  21784. +- __le32_to_cpu(ev->sw_version),
  21785. +- __le32_to_cpu(ev->abi_version),
  21786. +- ev->mac_addr.addr,
  21787. +- __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
  21788. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  21789. ++ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
  21790. ++ __le32_to_cpu(arg.sw_version),
  21791. ++ __le32_to_cpu(arg.abi_version),
  21792. ++ arg.mac_addr,
  21793. ++ __le32_to_cpu(arg.status));
  21794. +
  21795. ++ ether_addr_copy(ar->mac_addr, arg.mac_addr);
  21796. + complete(&ar->wmi.unified_ready);
  21797. + return 0;
  21798. + }
  21799. +
  21800. +-static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
  21801. ++static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
  21802. ++{
  21803. ++ const struct wmi_pdev_temperature_event *ev;
  21804. ++
  21805. ++ ev = (struct wmi_pdev_temperature_event *)skb->data;
  21806. ++ if (WARN_ON(skb->len < sizeof(*ev)))
  21807. ++ return -EPROTO;
  21808. ++
  21809. ++ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
  21810. ++ return 0;
  21811. ++}
  21812. ++
  21813. ++static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
  21814. + {
  21815. + struct wmi_cmd_hdr *cmd_hdr;
  21816. + enum wmi_event_id id;
  21817. +- u16 len;
  21818. +
  21819. + cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  21820. + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  21821. +@@ -2098,9 +3277,7 @@ static void ath10k_wmi_main_process_rx(s
  21822. + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  21823. + return;
  21824. +
  21825. +- len = skb->len;
  21826. +-
  21827. +- trace_ath10k_wmi_event(id, skb->data, skb->len);
  21828. ++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  21829. +
  21830. + switch (id) {
  21831. + case WMI_MGMT_RX_EVENTID:
  21832. +@@ -2192,24 +3369,24 @@ static void ath10k_wmi_main_process_rx(s
  21833. + ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  21834. + break;
  21835. + case WMI_SERVICE_READY_EVENTID:
  21836. +- ath10k_wmi_service_ready_event_rx(ar, skb);
  21837. ++ ath10k_wmi_event_service_ready(ar, skb);
  21838. + break;
  21839. + case WMI_READY_EVENTID:
  21840. +- ath10k_wmi_ready_event_rx(ar, skb);
  21841. ++ ath10k_wmi_event_ready(ar, skb);
  21842. + break;
  21843. + default:
  21844. +- ath10k_warn("Unknown eventid: %d\n", id);
  21845. ++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
  21846. + break;
  21847. + }
  21848. +
  21849. + dev_kfree_skb(skb);
  21850. + }
  21851. +
  21852. +-static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
  21853. ++static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
  21854. + {
  21855. + struct wmi_cmd_hdr *cmd_hdr;
  21856. + enum wmi_10x_event_id id;
  21857. +- u16 len;
  21858. ++ bool consumed;
  21859. +
  21860. + cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  21861. + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  21862. +@@ -2217,9 +3394,19 @@ static void ath10k_wmi_10x_process_rx(st
  21863. + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  21864. + return;
  21865. +
  21866. +- len = skb->len;
  21867. ++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  21868. ++
  21869. ++ consumed = ath10k_tm_event_wmi(ar, id, skb);
  21870. +
  21871. +- trace_ath10k_wmi_event(id, skb->data, skb->len);
  21872. ++ /* Ready event must be handled normally also in UTF mode so that we
  21873. ++ * know the UTF firmware has booted, others we are just bypass WMI
  21874. ++ * events to testmode.
  21875. ++ */
  21876. ++ if (consumed && id != WMI_10X_READY_EVENTID) {
  21877. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  21878. ++ "wmi testmode consumed 0x%x\n", id);
  21879. ++ goto out;
  21880. ++ }
  21881. +
  21882. + switch (id) {
  21883. + case WMI_10X_MGMT_RX_EVENTID:
  21884. +@@ -2302,64 +3489,153 @@ static void ath10k_wmi_10x_process_rx(st
  21885. + ath10k_wmi_event_vdev_resume_req(ar, skb);
  21886. + break;
  21887. + case WMI_10X_SERVICE_READY_EVENTID:
  21888. +- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
  21889. ++ ath10k_wmi_event_service_ready(ar, skb);
  21890. + break;
  21891. + case WMI_10X_READY_EVENTID:
  21892. +- ath10k_wmi_ready_event_rx(ar, skb);
  21893. ++ ath10k_wmi_event_ready(ar, skb);
  21894. ++ break;
  21895. ++ case WMI_10X_PDEV_UTF_EVENTID:
  21896. ++ /* ignore utf events */
  21897. + break;
  21898. + default:
  21899. +- ath10k_warn("Unknown eventid: %d\n", id);
  21900. ++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
  21901. + break;
  21902. + }
  21903. +
  21904. ++out:
  21905. + dev_kfree_skb(skb);
  21906. + }
  21907. +
  21908. +-
  21909. +-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  21910. ++static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
  21911. + {
  21912. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  21913. +- ath10k_wmi_10x_process_rx(ar, skb);
  21914. +- else
  21915. +- ath10k_wmi_main_process_rx(ar, skb);
  21916. +-}
  21917. ++ struct wmi_cmd_hdr *cmd_hdr;
  21918. ++ enum wmi_10_2_event_id id;
  21919. +
  21920. +-/* WMI Initialization functions */
  21921. +-int ath10k_wmi_attach(struct ath10k *ar)
  21922. +-{
  21923. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  21924. +- ar->wmi.cmd = &wmi_10x_cmd_map;
  21925. +- ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  21926. +- ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  21927. +- } else {
  21928. +- ar->wmi.cmd = &wmi_cmd_map;
  21929. +- ar->wmi.vdev_param = &wmi_vdev_param_map;
  21930. +- ar->wmi.pdev_param = &wmi_pdev_param_map;
  21931. +- }
  21932. ++ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  21933. ++ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  21934. +
  21935. +- init_completion(&ar->wmi.service_ready);
  21936. +- init_completion(&ar->wmi.unified_ready);
  21937. +- init_waitqueue_head(&ar->wmi.tx_credits_wq);
  21938. ++ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  21939. ++ return;
  21940. +
  21941. +- return 0;
  21942. ++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  21943. ++
  21944. ++ switch (id) {
  21945. ++ case WMI_10_2_MGMT_RX_EVENTID:
  21946. ++ ath10k_wmi_event_mgmt_rx(ar, skb);
  21947. ++ /* mgmt_rx() owns the skb now! */
  21948. ++ return;
  21949. ++ case WMI_10_2_SCAN_EVENTID:
  21950. ++ ath10k_wmi_event_scan(ar, skb);
  21951. ++ break;
  21952. ++ case WMI_10_2_CHAN_INFO_EVENTID:
  21953. ++ ath10k_wmi_event_chan_info(ar, skb);
  21954. ++ break;
  21955. ++ case WMI_10_2_ECHO_EVENTID:
  21956. ++ ath10k_wmi_event_echo(ar, skb);
  21957. ++ break;
  21958. ++ case WMI_10_2_DEBUG_MESG_EVENTID:
  21959. ++ ath10k_wmi_event_debug_mesg(ar, skb);
  21960. ++ break;
  21961. ++ case WMI_10_2_UPDATE_STATS_EVENTID:
  21962. ++ ath10k_wmi_event_update_stats(ar, skb);
  21963. ++ break;
  21964. ++ case WMI_10_2_VDEV_START_RESP_EVENTID:
  21965. ++ ath10k_wmi_event_vdev_start_resp(ar, skb);
  21966. ++ break;
  21967. ++ case WMI_10_2_VDEV_STOPPED_EVENTID:
  21968. ++ ath10k_wmi_event_vdev_stopped(ar, skb);
  21969. ++ break;
  21970. ++ case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
  21971. ++ ath10k_wmi_event_peer_sta_kickout(ar, skb);
  21972. ++ break;
  21973. ++ case WMI_10_2_HOST_SWBA_EVENTID:
  21974. ++ ath10k_wmi_event_host_swba(ar, skb);
  21975. ++ break;
  21976. ++ case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
  21977. ++ ath10k_wmi_event_tbttoffset_update(ar, skb);
  21978. ++ break;
  21979. ++ case WMI_10_2_PHYERR_EVENTID:
  21980. ++ ath10k_wmi_event_phyerr(ar, skb);
  21981. ++ break;
  21982. ++ case WMI_10_2_ROAM_EVENTID:
  21983. ++ ath10k_wmi_event_roam(ar, skb);
  21984. ++ break;
  21985. ++ case WMI_10_2_PROFILE_MATCH:
  21986. ++ ath10k_wmi_event_profile_match(ar, skb);
  21987. ++ break;
  21988. ++ case WMI_10_2_DEBUG_PRINT_EVENTID:
  21989. ++ ath10k_wmi_event_debug_print(ar, skb);
  21990. ++ break;
  21991. ++ case WMI_10_2_PDEV_QVIT_EVENTID:
  21992. ++ ath10k_wmi_event_pdev_qvit(ar, skb);
  21993. ++ break;
  21994. ++ case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
  21995. ++ ath10k_wmi_event_wlan_profile_data(ar, skb);
  21996. ++ break;
  21997. ++ case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
  21998. ++ ath10k_wmi_event_rtt_measurement_report(ar, skb);
  21999. ++ break;
  22000. ++ case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
  22001. ++ ath10k_wmi_event_tsf_measurement_report(ar, skb);
  22002. ++ break;
  22003. ++ case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
  22004. ++ ath10k_wmi_event_rtt_error_report(ar, skb);
  22005. ++ break;
  22006. ++ case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
  22007. ++ ath10k_wmi_event_wow_wakeup_host(ar, skb);
  22008. ++ break;
  22009. ++ case WMI_10_2_DCS_INTERFERENCE_EVENTID:
  22010. ++ ath10k_wmi_event_dcs_interference(ar, skb);
  22011. ++ break;
  22012. ++ case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
  22013. ++ ath10k_wmi_event_pdev_tpc_config(ar, skb);
  22014. ++ break;
  22015. ++ case WMI_10_2_INST_RSSI_STATS_EVENTID:
  22016. ++ ath10k_wmi_event_inst_rssi_stats(ar, skb);
  22017. ++ break;
  22018. ++ case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
  22019. ++ ath10k_wmi_event_vdev_standby_req(ar, skb);
  22020. ++ break;
  22021. ++ case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
  22022. ++ ath10k_wmi_event_vdev_resume_req(ar, skb);
  22023. ++ break;
  22024. ++ case WMI_10_2_SERVICE_READY_EVENTID:
  22025. ++ ath10k_wmi_event_service_ready(ar, skb);
  22026. ++ break;
  22027. ++ case WMI_10_2_READY_EVENTID:
  22028. ++ ath10k_wmi_event_ready(ar, skb);
  22029. ++ break;
  22030. ++ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
  22031. ++ ath10k_wmi_event_temperature(ar, skb);
  22032. ++ break;
  22033. ++ case WMI_10_2_RTT_KEEPALIVE_EVENTID:
  22034. ++ case WMI_10_2_GPIO_INPUT_EVENTID:
  22035. ++ case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
  22036. ++ case WMI_10_2_GENERIC_BUFFER_EVENTID:
  22037. ++ case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
  22038. ++ case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
  22039. ++ case WMI_10_2_WDS_PEER_EVENTID:
  22040. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22041. ++ "received event id %d not implemented\n", id);
  22042. ++ break;
  22043. ++ default:
  22044. ++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
  22045. ++ break;
  22046. ++ }
  22047. ++
  22048. ++ dev_kfree_skb(skb);
  22049. + }
  22050. +
  22051. +-void ath10k_wmi_detach(struct ath10k *ar)
  22052. ++static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  22053. + {
  22054. +- int i;
  22055. +-
  22056. +- /* free the host memory chunks requested by firmware */
  22057. +- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  22058. +- dma_free_coherent(ar->dev,
  22059. +- ar->wmi.mem_chunks[i].len,
  22060. +- ar->wmi.mem_chunks[i].vaddr,
  22061. +- ar->wmi.mem_chunks[i].paddr);
  22062. +- }
  22063. ++ int ret;
  22064. +
  22065. +- ar->wmi.num_mem_chunks = 0;
  22066. ++ ret = ath10k_wmi_rx(ar, skb);
  22067. ++ if (ret)
  22068. ++ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
  22069. + }
  22070. +
  22071. +-int ath10k_wmi_connect_htc_service(struct ath10k *ar)
  22072. ++int ath10k_wmi_connect(struct ath10k *ar)
  22073. + {
  22074. + int status;
  22075. + struct ath10k_htc_svc_conn_req conn_req;
  22076. +@@ -2378,7 +3654,7 @@ int ath10k_wmi_connect_htc_service(struc
  22077. +
  22078. + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  22079. + if (status) {
  22080. +- ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
  22081. ++ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
  22082. + status);
  22083. + return status;
  22084. + }
  22085. +@@ -2387,16 +3663,17 @@ int ath10k_wmi_connect_htc_service(struc
  22086. + return 0;
  22087. + }
  22088. +
  22089. +-static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  22090. +- u16 rd2g, u16 rd5g, u16 ctl2g,
  22091. +- u16 ctl5g)
  22092. ++static struct sk_buff *
  22093. ++ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  22094. ++ u16 ctl2g, u16 ctl5g,
  22095. ++ enum wmi_dfs_region dfs_reg)
  22096. + {
  22097. + struct wmi_pdev_set_regdomain_cmd *cmd;
  22098. + struct sk_buff *skb;
  22099. +
  22100. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22101. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22102. + if (!skb)
  22103. +- return -ENOMEM;
  22104. ++ return ERR_PTR(-ENOMEM);
  22105. +
  22106. + cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  22107. + cmd->reg_domain = __cpu_to_le32(rd);
  22108. +@@ -2405,25 +3682,23 @@ static int ath10k_wmi_main_pdev_set_regd
  22109. + cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  22110. + cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  22111. +
  22112. +- ath10k_dbg(ATH10K_DBG_WMI,
  22113. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22114. + "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  22115. + rd, rd2g, rd5g, ctl2g, ctl5g);
  22116. +-
  22117. +- return ath10k_wmi_cmd_send(ar, skb,
  22118. +- ar->wmi.cmd->pdev_set_regdomain_cmdid);
  22119. ++ return skb;
  22120. + }
  22121. +
  22122. +-static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  22123. +- u16 rd2g, u16 rd5g,
  22124. +- u16 ctl2g, u16 ctl5g,
  22125. +- enum wmi_dfs_region dfs_reg)
  22126. ++static struct sk_buff *
  22127. ++ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
  22128. ++ rd5g, u16 ctl2g, u16 ctl5g,
  22129. ++ enum wmi_dfs_region dfs_reg)
  22130. + {
  22131. + struct wmi_pdev_set_regdomain_cmd_10x *cmd;
  22132. + struct sk_buff *skb;
  22133. +
  22134. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22135. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22136. + if (!skb)
  22137. +- return -ENOMEM;
  22138. ++ return ERR_PTR(-ENOMEM);
  22139. +
  22140. + cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
  22141. + cmd->reg_domain = __cpu_to_le32(rd);
  22142. +@@ -2433,121 +3708,96 @@ static int ath10k_wmi_10x_pdev_set_regdo
  22143. + cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  22144. + cmd->dfs_domain = __cpu_to_le32(dfs_reg);
  22145. +
  22146. +- ath10k_dbg(ATH10K_DBG_WMI,
  22147. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22148. + "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
  22149. + rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
  22150. +-
  22151. +- return ath10k_wmi_cmd_send(ar, skb,
  22152. +- ar->wmi.cmd->pdev_set_regdomain_cmdid);
  22153. +-}
  22154. +-
  22155. +-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  22156. +- u16 rd5g, u16 ctl2g, u16 ctl5g,
  22157. +- enum wmi_dfs_region dfs_reg)
  22158. +-{
  22159. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  22160. +- return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  22161. +- ctl2g, ctl5g, dfs_reg);
  22162. +- else
  22163. +- return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  22164. +- ctl2g, ctl5g);
  22165. +-}
  22166. +-
  22167. +-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
  22168. +- const struct wmi_channel_arg *arg)
  22169. +-{
  22170. +- struct wmi_set_channel_cmd *cmd;
  22171. +- struct sk_buff *skb;
  22172. +- u32 ch_flags = 0;
  22173. +-
  22174. +- if (arg->passive)
  22175. +- return -EINVAL;
  22176. +-
  22177. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22178. +- if (!skb)
  22179. +- return -ENOMEM;
  22180. +-
  22181. +- if (arg->chan_radar)
  22182. +- ch_flags |= WMI_CHAN_FLAG_DFS;
  22183. +-
  22184. +- cmd = (struct wmi_set_channel_cmd *)skb->data;
  22185. +- cmd->chan.mhz = __cpu_to_le32(arg->freq);
  22186. +- cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
  22187. +- cmd->chan.mode = arg->mode;
  22188. +- cmd->chan.flags |= __cpu_to_le32(ch_flags);
  22189. +- cmd->chan.min_power = arg->min_power;
  22190. +- cmd->chan.max_power = arg->max_power;
  22191. +- cmd->chan.reg_power = arg->max_reg_power;
  22192. +- cmd->chan.reg_classid = arg->reg_class_id;
  22193. +- cmd->chan.antenna_max = arg->max_antenna_gain;
  22194. +-
  22195. +- ath10k_dbg(ATH10K_DBG_WMI,
  22196. +- "wmi set channel mode %d freq %d\n",
  22197. +- arg->mode, arg->freq);
  22198. +-
  22199. +- return ath10k_wmi_cmd_send(ar, skb,
  22200. +- ar->wmi.cmd->pdev_set_channel_cmdid);
  22201. ++ return skb;
  22202. + }
  22203. +
  22204. +-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  22205. ++static struct sk_buff *
  22206. ++ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
  22207. + {
  22208. + struct wmi_pdev_suspend_cmd *cmd;
  22209. + struct sk_buff *skb;
  22210. +
  22211. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22212. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22213. + if (!skb)
  22214. +- return -ENOMEM;
  22215. ++ return ERR_PTR(-ENOMEM);
  22216. +
  22217. + cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  22218. + cmd->suspend_opt = __cpu_to_le32(suspend_opt);
  22219. +
  22220. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  22221. ++ return skb;
  22222. + }
  22223. +
  22224. +-int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  22225. ++static struct sk_buff *
  22226. ++ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
  22227. + {
  22228. + struct sk_buff *skb;
  22229. +
  22230. +- skb = ath10k_wmi_alloc_skb(0);
  22231. +- if (skb == NULL)
  22232. +- return -ENOMEM;
  22233. ++ skb = ath10k_wmi_alloc_skb(ar, 0);
  22234. ++ if (!skb)
  22235. ++ return ERR_PTR(-ENOMEM);
  22236. +
  22237. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  22238. ++ return skb;
  22239. + }
  22240. +
  22241. +-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  22242. ++static struct sk_buff *
  22243. ++ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  22244. + {
  22245. + struct wmi_pdev_set_param_cmd *cmd;
  22246. + struct sk_buff *skb;
  22247. +
  22248. + if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
  22249. +- ath10k_warn("pdev param %d not supported by firmware\n", id);
  22250. +- return -EOPNOTSUPP;
  22251. ++ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
  22252. ++ id);
  22253. ++ return ERR_PTR(-EOPNOTSUPP);
  22254. + }
  22255. +
  22256. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22257. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22258. + if (!skb)
  22259. +- return -ENOMEM;
  22260. ++ return ERR_PTR(-ENOMEM);
  22261. +
  22262. + cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  22263. + cmd->param_id = __cpu_to_le32(id);
  22264. + cmd->param_value = __cpu_to_le32(value);
  22265. +
  22266. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  22267. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  22268. + id, value);
  22269. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  22270. ++ return skb;
  22271. + }
  22272. +
  22273. +-static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
  22274. ++void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
  22275. ++ struct wmi_host_mem_chunks *chunks)
  22276. + {
  22277. +- struct wmi_init_cmd *cmd;
  22278. +- struct sk_buff *buf;
  22279. +- struct wmi_resource_config config = {};
  22280. +- u32 len, val;
  22281. ++ struct host_memory_chunk *chunk;
  22282. + int i;
  22283. +
  22284. +- config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  22285. +- config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
  22286. ++ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
  22287. ++
  22288. ++ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  22289. ++ chunk = &chunks->items[i];
  22290. ++ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  22291. ++ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  22292. ++ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  22293. ++
  22294. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22295. ++ "wmi chunk %d len %d requested, addr 0x%llx\n",
  22296. ++ i,
  22297. ++ ar->wmi.mem_chunks[i].len,
  22298. ++ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  22299. ++ }
  22300. ++}
  22301. ++
  22302. ++static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
  22303. ++{
  22304. ++ struct wmi_init_cmd *cmd;
  22305. ++ struct sk_buff *buf;
  22306. ++ struct wmi_resource_config config = {};
  22307. ++ u32 len, val;
  22308. ++
  22309. ++ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  22310. ++ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
  22311. + config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  22312. +
  22313. + config.num_offload_reorder_bufs =
  22314. +@@ -2600,50 +3850,25 @@ static int ath10k_wmi_main_cmd_init(stru
  22315. + len = sizeof(*cmd) +
  22316. + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  22317. +
  22318. +- buf = ath10k_wmi_alloc_skb(len);
  22319. ++ buf = ath10k_wmi_alloc_skb(ar, len);
  22320. + if (!buf)
  22321. +- return -ENOMEM;
  22322. ++ return ERR_PTR(-ENOMEM);
  22323. +
  22324. + cmd = (struct wmi_init_cmd *)buf->data;
  22325. +
  22326. +- if (ar->wmi.num_mem_chunks == 0) {
  22327. +- cmd->num_host_mem_chunks = 0;
  22328. +- goto out;
  22329. +- }
  22330. +-
  22331. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
  22332. +- ar->wmi.num_mem_chunks);
  22333. +-
  22334. +- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  22335. +-
  22336. +- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  22337. +- cmd->host_mem_chunks[i].ptr =
  22338. +- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  22339. +- cmd->host_mem_chunks[i].size =
  22340. +- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  22341. +- cmd->host_mem_chunks[i].req_id =
  22342. +- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  22343. +-
  22344. +- ath10k_dbg(ATH10K_DBG_WMI,
  22345. +- "wmi chunk %d len %d requested, addr 0x%llx\n",
  22346. +- i,
  22347. +- ar->wmi.mem_chunks[i].len,
  22348. +- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  22349. +- }
  22350. +-out:
  22351. + memcpy(&cmd->resource_config, &config, sizeof(config));
  22352. ++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  22353. +
  22354. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
  22355. +- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  22356. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
  22357. ++ return buf;
  22358. + }
  22359. +
  22360. +-static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
  22361. ++static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
  22362. + {
  22363. + struct wmi_init_cmd_10x *cmd;
  22364. + struct sk_buff *buf;
  22365. + struct wmi_resource_config_10x config = {};
  22366. + u32 len, val;
  22367. +- int i;
  22368. +
  22369. + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  22370. + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  22371. +@@ -2691,101 +3916,132 @@ static int ath10k_wmi_10x_cmd_init(struc
  22372. + len = sizeof(*cmd) +
  22373. + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  22374. +
  22375. +- buf = ath10k_wmi_alloc_skb(len);
  22376. ++ buf = ath10k_wmi_alloc_skb(ar, len);
  22377. + if (!buf)
  22378. +- return -ENOMEM;
  22379. ++ return ERR_PTR(-ENOMEM);
  22380. +
  22381. + cmd = (struct wmi_init_cmd_10x *)buf->data;
  22382. +
  22383. +- if (ar->wmi.num_mem_chunks == 0) {
  22384. +- cmd->num_host_mem_chunks = 0;
  22385. +- goto out;
  22386. +- }
  22387. ++ memcpy(&cmd->resource_config, &config, sizeof(config));
  22388. ++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  22389. +
  22390. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
  22391. +- ar->wmi.num_mem_chunks);
  22392. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
  22393. ++ return buf;
  22394. ++}
  22395. +
  22396. +- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  22397. ++static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
  22398. ++{
  22399. ++ struct wmi_init_cmd_10_2 *cmd;
  22400. ++ struct sk_buff *buf;
  22401. ++ struct wmi_resource_config_10x config = {};
  22402. ++ u32 len, val, features;
  22403. +
  22404. +- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  22405. +- cmd->host_mem_chunks[i].ptr =
  22406. +- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  22407. +- cmd->host_mem_chunks[i].size =
  22408. +- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  22409. +- cmd->host_mem_chunks[i].req_id =
  22410. +- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  22411. ++ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  22412. ++ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  22413. ++ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  22414. ++ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  22415. ++ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  22416. ++ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  22417. ++ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  22418. ++ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  22419. ++ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  22420. ++ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  22421. ++ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  22422. ++ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  22423. +
  22424. +- ath10k_dbg(ATH10K_DBG_WMI,
  22425. +- "wmi chunk %d len %d requested, addr 0x%llx\n",
  22426. +- i,
  22427. +- ar->wmi.mem_chunks[i].len,
  22428. +- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  22429. +- }
  22430. +-out:
  22431. +- memcpy(&cmd->resource_config, &config, sizeof(config));
  22432. ++ config.scan_max_pending_reqs =
  22433. ++ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  22434. ++
  22435. ++ config.bmiss_offload_max_vdev =
  22436. ++ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  22437. ++
  22438. ++ config.roam_offload_max_vdev =
  22439. ++ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  22440. ++
  22441. ++ config.roam_offload_max_ap_profiles =
  22442. ++ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  22443. ++
  22444. ++ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  22445. ++ config.num_mcast_table_elems =
  22446. ++ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  22447. ++
  22448. ++ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  22449. ++ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  22450. ++ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  22451. ++ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
  22452. ++ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  22453. ++
  22454. ++ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  22455. ++ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  22456. ++
  22457. ++ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  22458. ++
  22459. ++ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  22460. ++ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  22461. +
  22462. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
  22463. +- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  22464. ++ len = sizeof(*cmd) +
  22465. ++ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  22466. ++
  22467. ++ buf = ath10k_wmi_alloc_skb(ar, len);
  22468. ++ if (!buf)
  22469. ++ return ERR_PTR(-ENOMEM);
  22470. ++
  22471. ++ cmd = (struct wmi_init_cmd_10_2 *)buf->data;
  22472. ++
  22473. ++ features = WMI_10_2_RX_BATCH_MODE;
  22474. ++ cmd->resource_config.feature_mask = __cpu_to_le32(features);
  22475. ++
  22476. ++ memcpy(&cmd->resource_config.common, &config, sizeof(config));
  22477. ++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  22478. ++
  22479. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
  22480. ++ return buf;
  22481. + }
  22482. +
  22483. +-int ath10k_wmi_cmd_init(struct ath10k *ar)
  22484. ++int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
  22485. + {
  22486. +- int ret;
  22487. ++ if (arg->ie_len && !arg->ie)
  22488. ++ return -EINVAL;
  22489. ++ if (arg->n_channels && !arg->channels)
  22490. ++ return -EINVAL;
  22491. ++ if (arg->n_ssids && !arg->ssids)
  22492. ++ return -EINVAL;
  22493. ++ if (arg->n_bssids && !arg->bssids)
  22494. ++ return -EINVAL;
  22495. +
  22496. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  22497. +- ret = ath10k_wmi_10x_cmd_init(ar);
  22498. +- else
  22499. +- ret = ath10k_wmi_main_cmd_init(ar);
  22500. ++ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  22501. ++ return -EINVAL;
  22502. ++ if (arg->n_channels > ARRAY_SIZE(arg->channels))
  22503. ++ return -EINVAL;
  22504. ++ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  22505. ++ return -EINVAL;
  22506. ++ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  22507. ++ return -EINVAL;
  22508. +
  22509. +- return ret;
  22510. ++ return 0;
  22511. + }
  22512. +
  22513. +-static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
  22514. +- const struct wmi_start_scan_arg *arg)
  22515. ++static size_t
  22516. ++ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
  22517. + {
  22518. +- int len;
  22519. +-
  22520. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  22521. +- len = sizeof(struct wmi_start_scan_cmd_10x);
  22522. +- else
  22523. +- len = sizeof(struct wmi_start_scan_cmd);
  22524. ++ int len = 0;
  22525. +
  22526. + if (arg->ie_len) {
  22527. +- if (!arg->ie)
  22528. +- return -EINVAL;
  22529. +- if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  22530. +- return -EINVAL;
  22531. +-
  22532. + len += sizeof(struct wmi_ie_data);
  22533. + len += roundup(arg->ie_len, 4);
  22534. + }
  22535. +
  22536. + if (arg->n_channels) {
  22537. +- if (!arg->channels)
  22538. +- return -EINVAL;
  22539. +- if (arg->n_channels > ARRAY_SIZE(arg->channels))
  22540. +- return -EINVAL;
  22541. +-
  22542. + len += sizeof(struct wmi_chan_list);
  22543. + len += sizeof(__le32) * arg->n_channels;
  22544. + }
  22545. +
  22546. + if (arg->n_ssids) {
  22547. +- if (!arg->ssids)
  22548. +- return -EINVAL;
  22549. +- if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  22550. +- return -EINVAL;
  22551. +-
  22552. + len += sizeof(struct wmi_ssid_list);
  22553. + len += sizeof(struct wmi_ssid) * arg->n_ssids;
  22554. + }
  22555. +
  22556. + if (arg->n_bssids) {
  22557. +- if (!arg->bssids)
  22558. +- return -EINVAL;
  22559. +- if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  22560. +- return -EINVAL;
  22561. +-
  22562. + len += sizeof(struct wmi_bssid_list);
  22563. + len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  22564. + }
  22565. +@@ -2793,28 +4049,11 @@ static int ath10k_wmi_start_scan_calc_le
  22566. + return len;
  22567. + }
  22568. +
  22569. +-int ath10k_wmi_start_scan(struct ath10k *ar,
  22570. +- const struct wmi_start_scan_arg *arg)
  22571. ++void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
  22572. ++ const struct wmi_start_scan_arg *arg)
  22573. + {
  22574. +- struct wmi_start_scan_cmd *cmd;
  22575. +- struct sk_buff *skb;
  22576. +- struct wmi_ie_data *ie;
  22577. +- struct wmi_chan_list *channels;
  22578. +- struct wmi_ssid_list *ssids;
  22579. +- struct wmi_bssid_list *bssids;
  22580. + u32 scan_id;
  22581. + u32 scan_req_id;
  22582. +- int off;
  22583. +- int len = 0;
  22584. +- int i;
  22585. +-
  22586. +- len = ath10k_wmi_start_scan_calc_len(ar, arg);
  22587. +- if (len < 0)
  22588. +- return len; /* len contains error code here */
  22589. +-
  22590. +- skb = ath10k_wmi_alloc_skb(len);
  22591. +- if (!skb)
  22592. +- return -ENOMEM;
  22593. +
  22594. + scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  22595. + scan_id |= arg->scan_id;
  22596. +@@ -2822,48 +4061,49 @@ int ath10k_wmi_start_scan(struct ath10k
  22597. + scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  22598. + scan_req_id |= arg->scan_req_id;
  22599. +
  22600. +- cmd = (struct wmi_start_scan_cmd *)skb->data;
  22601. +- cmd->scan_id = __cpu_to_le32(scan_id);
  22602. +- cmd->scan_req_id = __cpu_to_le32(scan_req_id);
  22603. +- cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  22604. +- cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  22605. +- cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  22606. +- cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  22607. +- cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  22608. +- cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  22609. +- cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  22610. +- cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  22611. +- cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  22612. +- cmd->idle_time = __cpu_to_le32(arg->idle_time);
  22613. +- cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  22614. +- cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
  22615. +- cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  22616. +-
  22617. +- /* TLV list starts after fields included in the struct */
  22618. +- /* There's just one filed that differes the two start_scan
  22619. +- * structures - burst_duration, which we are not using btw,
  22620. +- no point to make the split here, just shift the buffer to fit with
  22621. +- given FW */
  22622. +- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  22623. +- off = sizeof(struct wmi_start_scan_cmd_10x);
  22624. +- else
  22625. +- off = sizeof(struct wmi_start_scan_cmd);
  22626. ++ cmn->scan_id = __cpu_to_le32(scan_id);
  22627. ++ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
  22628. ++ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
  22629. ++ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
  22630. ++ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  22631. ++ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  22632. ++ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  22633. ++ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  22634. ++ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  22635. ++ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  22636. ++ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  22637. ++ cmn->idle_time = __cpu_to_le32(arg->idle_time);
  22638. ++ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  22639. ++ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
  22640. ++ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  22641. ++}
  22642. ++
  22643. ++static void
  22644. ++ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
  22645. ++ const struct wmi_start_scan_arg *arg)
  22646. ++{
  22647. ++ struct wmi_ie_data *ie;
  22648. ++ struct wmi_chan_list *channels;
  22649. ++ struct wmi_ssid_list *ssids;
  22650. ++ struct wmi_bssid_list *bssids;
  22651. ++ void *ptr = tlvs->tlvs;
  22652. ++ int i;
  22653. +
  22654. + if (arg->n_channels) {
  22655. +- channels = (void *)skb->data + off;
  22656. ++ channels = ptr;
  22657. + channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  22658. + channels->num_chan = __cpu_to_le32(arg->n_channels);
  22659. +
  22660. + for (i = 0; i < arg->n_channels; i++)
  22661. +- channels->channel_list[i] =
  22662. +- __cpu_to_le32(arg->channels[i]);
  22663. ++ channels->channel_list[i].freq =
  22664. ++ __cpu_to_le16(arg->channels[i]);
  22665. +
  22666. +- off += sizeof(*channels);
  22667. +- off += sizeof(__le32) * arg->n_channels;
  22668. ++ ptr += sizeof(*channels);
  22669. ++ ptr += sizeof(__le32) * arg->n_channels;
  22670. + }
  22671. +
  22672. + if (arg->n_ssids) {
  22673. +- ssids = (void *)skb->data + off;
  22674. ++ ssids = ptr;
  22675. + ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  22676. + ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  22677. +
  22678. +@@ -2875,12 +4115,12 @@ int ath10k_wmi_start_scan(struct ath10k
  22679. + arg->ssids[i].len);
  22680. + }
  22681. +
  22682. +- off += sizeof(*ssids);
  22683. +- off += sizeof(struct wmi_ssid) * arg->n_ssids;
  22684. ++ ptr += sizeof(*ssids);
  22685. ++ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
  22686. + }
  22687. +
  22688. + if (arg->n_bssids) {
  22689. +- bssids = (void *)skb->data + off;
  22690. ++ bssids = ptr;
  22691. + bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  22692. + bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  22693. +
  22694. +@@ -2889,27 +4129,75 @@ int ath10k_wmi_start_scan(struct ath10k
  22695. + arg->bssids[i].bssid,
  22696. + ETH_ALEN);
  22697. +
  22698. +- off += sizeof(*bssids);
  22699. +- off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  22700. ++ ptr += sizeof(*bssids);
  22701. ++ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  22702. + }
  22703. +
  22704. + if (arg->ie_len) {
  22705. +- ie = (void *)skb->data + off;
  22706. ++ ie = ptr;
  22707. + ie->tag = __cpu_to_le32(WMI_IE_TAG);
  22708. + ie->ie_len = __cpu_to_le32(arg->ie_len);
  22709. + memcpy(ie->ie_data, arg->ie, arg->ie_len);
  22710. +
  22711. +- off += sizeof(*ie);
  22712. +- off += roundup(arg->ie_len, 4);
  22713. ++ ptr += sizeof(*ie);
  22714. ++ ptr += roundup(arg->ie_len, 4);
  22715. + }
  22716. ++}
  22717. +
  22718. +- if (off != skb->len) {
  22719. +- dev_kfree_skb(skb);
  22720. +- return -EINVAL;
  22721. +- }
  22722. ++static struct sk_buff *
  22723. ++ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
  22724. ++ const struct wmi_start_scan_arg *arg)
  22725. ++{
  22726. ++ struct wmi_start_scan_cmd *cmd;
  22727. ++ struct sk_buff *skb;
  22728. ++ size_t len;
  22729. ++ int ret;
  22730. ++
  22731. ++ ret = ath10k_wmi_start_scan_verify(arg);
  22732. ++ if (ret)
  22733. ++ return ERR_PTR(ret);
  22734. +
  22735. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
  22736. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  22737. ++ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
  22738. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  22739. ++ if (!skb)
  22740. ++ return ERR_PTR(-ENOMEM);
  22741. ++
  22742. ++ cmd = (struct wmi_start_scan_cmd *)skb->data;
  22743. ++
  22744. ++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  22745. ++ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  22746. ++
  22747. ++ cmd->burst_duration_ms = __cpu_to_le32(0);
  22748. ++
  22749. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
  22750. ++ return skb;
  22751. ++}
  22752. ++
  22753. ++static struct sk_buff *
  22754. ++ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
  22755. ++ const struct wmi_start_scan_arg *arg)
  22756. ++{
  22757. ++ struct wmi_10x_start_scan_cmd *cmd;
  22758. ++ struct sk_buff *skb;
  22759. ++ size_t len;
  22760. ++ int ret;
  22761. ++
  22762. ++ ret = ath10k_wmi_start_scan_verify(arg);
  22763. ++ if (ret)
  22764. ++ return ERR_PTR(ret);
  22765. ++
  22766. ++ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
  22767. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  22768. ++ if (!skb)
  22769. ++ return ERR_PTR(-ENOMEM);
  22770. ++
  22771. ++ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
  22772. ++
  22773. ++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  22774. ++ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  22775. ++
  22776. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
  22777. ++ return skb;
  22778. + }
  22779. +
  22780. + void ath10k_wmi_start_scan_init(struct ath10k *ar,
  22781. +@@ -2938,7 +4226,9 @@ void ath10k_wmi_start_scan_init(struct a
  22782. + arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  22783. + }
  22784. +
  22785. +-int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  22786. ++static struct sk_buff *
  22787. ++ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
  22788. ++ const struct wmi_stop_scan_arg *arg)
  22789. + {
  22790. + struct wmi_stop_scan_cmd *cmd;
  22791. + struct sk_buff *skb;
  22792. +@@ -2946,13 +4236,13 @@ int ath10k_wmi_stop_scan(struct ath10k *
  22793. + u32 req_id;
  22794. +
  22795. + if (arg->req_id > 0xFFF)
  22796. +- return -EINVAL;
  22797. ++ return ERR_PTR(-EINVAL);
  22798. + if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  22799. +- return -EINVAL;
  22800. ++ return ERR_PTR(-EINVAL);
  22801. +
  22802. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22803. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22804. + if (!skb)
  22805. +- return -ENOMEM;
  22806. ++ return ERR_PTR(-ENOMEM);
  22807. +
  22808. + scan_id = arg->u.scan_id;
  22809. + scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  22810. +@@ -2966,92 +4256,85 @@ int ath10k_wmi_stop_scan(struct ath10k *
  22811. + cmd->scan_id = __cpu_to_le32(scan_id);
  22812. + cmd->scan_req_id = __cpu_to_le32(req_id);
  22813. +
  22814. +- ath10k_dbg(ATH10K_DBG_WMI,
  22815. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22816. + "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  22817. + arg->req_id, arg->req_type, arg->u.scan_id);
  22818. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  22819. ++ return skb;
  22820. + }
  22821. +
  22822. +-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  22823. +- enum wmi_vdev_type type,
  22824. +- enum wmi_vdev_subtype subtype,
  22825. +- const u8 macaddr[ETH_ALEN])
  22826. ++static struct sk_buff *
  22827. ++ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
  22828. ++ enum wmi_vdev_type type,
  22829. ++ enum wmi_vdev_subtype subtype,
  22830. ++ const u8 macaddr[ETH_ALEN])
  22831. + {
  22832. + struct wmi_vdev_create_cmd *cmd;
  22833. + struct sk_buff *skb;
  22834. +
  22835. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22836. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22837. + if (!skb)
  22838. +- return -ENOMEM;
  22839. ++ return ERR_PTR(-ENOMEM);
  22840. +
  22841. + cmd = (struct wmi_vdev_create_cmd *)skb->data;
  22842. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  22843. + cmd->vdev_type = __cpu_to_le32(type);
  22844. + cmd->vdev_subtype = __cpu_to_le32(subtype);
  22845. +- memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
  22846. ++ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  22847. +
  22848. +- ath10k_dbg(ATH10K_DBG_WMI,
  22849. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22850. + "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  22851. + vdev_id, type, subtype, macaddr);
  22852. +-
  22853. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  22854. ++ return skb;
  22855. + }
  22856. +
  22857. +-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  22858. ++static struct sk_buff *
  22859. ++ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
  22860. + {
  22861. + struct wmi_vdev_delete_cmd *cmd;
  22862. + struct sk_buff *skb;
  22863. +
  22864. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22865. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22866. + if (!skb)
  22867. +- return -ENOMEM;
  22868. ++ return ERR_PTR(-ENOMEM);
  22869. +
  22870. + cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  22871. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  22872. +
  22873. +- ath10k_dbg(ATH10K_DBG_WMI,
  22874. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22875. + "WMI vdev delete id %d\n", vdev_id);
  22876. +-
  22877. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  22878. ++ return skb;
  22879. + }
  22880. +
  22881. +-static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
  22882. +- const struct wmi_vdev_start_request_arg *arg,
  22883. +- u32 cmd_id)
  22884. ++static struct sk_buff *
  22885. ++ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
  22886. ++ const struct wmi_vdev_start_request_arg *arg,
  22887. ++ bool restart)
  22888. + {
  22889. + struct wmi_vdev_start_request_cmd *cmd;
  22890. + struct sk_buff *skb;
  22891. + const char *cmdname;
  22892. + u32 flags = 0;
  22893. +- u32 ch_flags = 0;
  22894. +
  22895. +- if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
  22896. +- cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
  22897. +- return -EINVAL;
  22898. + if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  22899. +- return -EINVAL;
  22900. ++ return ERR_PTR(-EINVAL);
  22901. + if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  22902. +- return -EINVAL;
  22903. ++ return ERR_PTR(-EINVAL);
  22904. + if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  22905. +- return -EINVAL;
  22906. ++ return ERR_PTR(-EINVAL);
  22907. +
  22908. +- if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
  22909. +- cmdname = "start";
  22910. +- else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
  22911. ++ if (restart)
  22912. + cmdname = "restart";
  22913. + else
  22914. +- return -EINVAL; /* should not happen, we already check cmd_id */
  22915. ++ cmdname = "start";
  22916. +
  22917. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22918. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22919. + if (!skb)
  22920. +- return -ENOMEM;
  22921. ++ return ERR_PTR(-ENOMEM);
  22922. +
  22923. + if (arg->hidden_ssid)
  22924. + flags |= WMI_VDEV_START_HIDDEN_SSID;
  22925. + if (arg->pmf_enabled)
  22926. + flags |= WMI_VDEV_START_PMF_ENABLED;
  22927. +- if (arg->channel.chan_radar)
  22928. +- ch_flags |= WMI_CHAN_FLAG_DFS;
  22929. +
  22930. + cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  22931. + cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  22932. +@@ -3067,143 +4350,118 @@ static int ath10k_wmi_vdev_start_restart
  22933. + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  22934. + }
  22935. +
  22936. +- cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
  22937. ++ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
  22938. +
  22939. +- cmd->chan.band_center_freq1 =
  22940. +- __cpu_to_le32(arg->channel.band_center_freq1);
  22941. +-
  22942. +- cmd->chan.mode = arg->channel.mode;
  22943. +- cmd->chan.flags |= __cpu_to_le32(ch_flags);
  22944. +- cmd->chan.min_power = arg->channel.min_power;
  22945. +- cmd->chan.max_power = arg->channel.max_power;
  22946. +- cmd->chan.reg_power = arg->channel.max_reg_power;
  22947. +- cmd->chan.reg_classid = arg->channel.reg_class_id;
  22948. +- cmd->chan.antenna_max = arg->channel.max_antenna_gain;
  22949. +-
  22950. +- ath10k_dbg(ATH10K_DBG_WMI,
  22951. +- "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
  22952. +- "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
  22953. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  22954. ++ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
  22955. ++ cmdname, arg->vdev_id,
  22956. + flags, arg->channel.freq, arg->channel.mode,
  22957. + cmd->chan.flags, arg->channel.max_power);
  22958. +
  22959. +- return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  22960. +-}
  22961. +-
  22962. +-int ath10k_wmi_vdev_start(struct ath10k *ar,
  22963. +- const struct wmi_vdev_start_request_arg *arg)
  22964. +-{
  22965. +- u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
  22966. +-
  22967. +- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  22968. +-}
  22969. +-
  22970. +-int ath10k_wmi_vdev_restart(struct ath10k *ar,
  22971. +- const struct wmi_vdev_start_request_arg *arg)
  22972. +-{
  22973. +- u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
  22974. +-
  22975. +- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  22976. ++ return skb;
  22977. + }
  22978. +
  22979. +-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  22980. ++static struct sk_buff *
  22981. ++ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
  22982. + {
  22983. + struct wmi_vdev_stop_cmd *cmd;
  22984. + struct sk_buff *skb;
  22985. +
  22986. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  22987. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  22988. + if (!skb)
  22989. +- return -ENOMEM;
  22990. ++ return ERR_PTR(-ENOMEM);
  22991. +
  22992. + cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  22993. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  22994. +
  22995. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  22996. +-
  22997. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  22998. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  22999. ++ return skb;
  23000. + }
  23001. +
  23002. +-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  23003. ++static struct sk_buff *
  23004. ++ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
  23005. ++ const u8 *bssid)
  23006. + {
  23007. + struct wmi_vdev_up_cmd *cmd;
  23008. + struct sk_buff *skb;
  23009. +
  23010. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23011. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23012. + if (!skb)
  23013. +- return -ENOMEM;
  23014. ++ return ERR_PTR(-ENOMEM);
  23015. +
  23016. + cmd = (struct wmi_vdev_up_cmd *)skb->data;
  23017. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23018. + cmd->vdev_assoc_id = __cpu_to_le32(aid);
  23019. +- memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
  23020. ++ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  23021. +
  23022. +- ath10k_dbg(ATH10K_DBG_WMI,
  23023. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23024. + "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  23025. + vdev_id, aid, bssid);
  23026. +-
  23027. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  23028. ++ return skb;
  23029. + }
  23030. +
  23031. +-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  23032. ++static struct sk_buff *
  23033. ++ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
  23034. + {
  23035. + struct wmi_vdev_down_cmd *cmd;
  23036. + struct sk_buff *skb;
  23037. +
  23038. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23039. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23040. + if (!skb)
  23041. +- return -ENOMEM;
  23042. ++ return ERR_PTR(-ENOMEM);
  23043. +
  23044. + cmd = (struct wmi_vdev_down_cmd *)skb->data;
  23045. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23046. +
  23047. +- ath10k_dbg(ATH10K_DBG_WMI,
  23048. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23049. + "wmi mgmt vdev down id 0x%x\n", vdev_id);
  23050. +-
  23051. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  23052. ++ return skb;
  23053. + }
  23054. +
  23055. +-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  23056. +- u32 param_id, u32 param_value)
  23057. ++static struct sk_buff *
  23058. ++ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  23059. ++ u32 param_id, u32 param_value)
  23060. + {
  23061. + struct wmi_vdev_set_param_cmd *cmd;
  23062. + struct sk_buff *skb;
  23063. +
  23064. + if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
  23065. +- ath10k_dbg(ATH10K_DBG_WMI,
  23066. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23067. + "vdev param %d not supported by firmware\n",
  23068. + param_id);
  23069. +- return -EOPNOTSUPP;
  23070. ++ return ERR_PTR(-EOPNOTSUPP);
  23071. + }
  23072. +
  23073. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23074. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23075. + if (!skb)
  23076. +- return -ENOMEM;
  23077. ++ return ERR_PTR(-ENOMEM);
  23078. +
  23079. + cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  23080. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23081. + cmd->param_id = __cpu_to_le32(param_id);
  23082. + cmd->param_value = __cpu_to_le32(param_value);
  23083. +
  23084. +- ath10k_dbg(ATH10K_DBG_WMI,
  23085. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23086. + "wmi vdev id 0x%x set param %d value %d\n",
  23087. + vdev_id, param_id, param_value);
  23088. +-
  23089. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  23090. ++ return skb;
  23091. + }
  23092. +
  23093. +-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  23094. +- const struct wmi_vdev_install_key_arg *arg)
  23095. ++static struct sk_buff *
  23096. ++ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
  23097. ++ const struct wmi_vdev_install_key_arg *arg)
  23098. + {
  23099. + struct wmi_vdev_install_key_cmd *cmd;
  23100. + struct sk_buff *skb;
  23101. +
  23102. + if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  23103. +- return -EINVAL;
  23104. ++ return ERR_PTR(-EINVAL);
  23105. + if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  23106. +- return -EINVAL;
  23107. ++ return ERR_PTR(-EINVAL);
  23108. +
  23109. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
  23110. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
  23111. + if (!skb)
  23112. +- return -ENOMEM;
  23113. ++ return ERR_PTR(-ENOMEM);
  23114. +
  23115. + cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  23116. + cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  23117. +@@ -3215,176 +4473,232 @@ int ath10k_wmi_vdev_install_key(struct a
  23118. + cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  23119. +
  23120. + if (arg->macaddr)
  23121. +- memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
  23122. ++ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  23123. + if (arg->key_data)
  23124. + memcpy(cmd->key_data, arg->key_data, arg->key_len);
  23125. +
  23126. +- ath10k_dbg(ATH10K_DBG_WMI,
  23127. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23128. + "wmi vdev install key idx %d cipher %d len %d\n",
  23129. + arg->key_idx, arg->key_cipher, arg->key_len);
  23130. +- return ath10k_wmi_cmd_send(ar, skb,
  23131. +- ar->wmi.cmd->vdev_install_key_cmdid);
  23132. ++ return skb;
  23133. + }
  23134. +
  23135. +-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  23136. +- const u8 peer_addr[ETH_ALEN])
  23137. ++static struct sk_buff *
  23138. ++ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
  23139. ++ const struct wmi_vdev_spectral_conf_arg *arg)
  23140. ++{
  23141. ++ struct wmi_vdev_spectral_conf_cmd *cmd;
  23142. ++ struct sk_buff *skb;
  23143. ++
  23144. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23145. ++ if (!skb)
  23146. ++ return ERR_PTR(-ENOMEM);
  23147. ++
  23148. ++ cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
  23149. ++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  23150. ++ cmd->scan_count = __cpu_to_le32(arg->scan_count);
  23151. ++ cmd->scan_period = __cpu_to_le32(arg->scan_period);
  23152. ++ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  23153. ++ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  23154. ++ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  23155. ++ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  23156. ++ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  23157. ++ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  23158. ++ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  23159. ++ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  23160. ++ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  23161. ++ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  23162. ++ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  23163. ++ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  23164. ++ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  23165. ++ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  23166. ++ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  23167. ++ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  23168. ++
  23169. ++ return skb;
  23170. ++}
  23171. ++
  23172. ++static struct sk_buff *
  23173. ++ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
  23174. ++ u32 trigger, u32 enable)
  23175. ++{
  23176. ++ struct wmi_vdev_spectral_enable_cmd *cmd;
  23177. ++ struct sk_buff *skb;
  23178. ++
  23179. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23180. ++ if (!skb)
  23181. ++ return ERR_PTR(-ENOMEM);
  23182. ++
  23183. ++ cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
  23184. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23185. ++ cmd->trigger_cmd = __cpu_to_le32(trigger);
  23186. ++ cmd->enable_cmd = __cpu_to_le32(enable);
  23187. ++
  23188. ++ return skb;
  23189. ++}
  23190. ++
  23191. ++static struct sk_buff *
  23192. ++ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
  23193. ++ const u8 peer_addr[ETH_ALEN])
  23194. + {
  23195. + struct wmi_peer_create_cmd *cmd;
  23196. + struct sk_buff *skb;
  23197. +
  23198. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23199. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23200. + if (!skb)
  23201. +- return -ENOMEM;
  23202. ++ return ERR_PTR(-ENOMEM);
  23203. +
  23204. + cmd = (struct wmi_peer_create_cmd *)skb->data;
  23205. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23206. +- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  23207. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  23208. +
  23209. +- ath10k_dbg(ATH10K_DBG_WMI,
  23210. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23211. + "wmi peer create vdev_id %d peer_addr %pM\n",
  23212. + vdev_id, peer_addr);
  23213. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  23214. ++ return skb;
  23215. + }
  23216. +
  23217. +-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  23218. +- const u8 peer_addr[ETH_ALEN])
  23219. ++static struct sk_buff *
  23220. ++ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
  23221. ++ const u8 peer_addr[ETH_ALEN])
  23222. + {
  23223. + struct wmi_peer_delete_cmd *cmd;
  23224. + struct sk_buff *skb;
  23225. +
  23226. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23227. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23228. + if (!skb)
  23229. +- return -ENOMEM;
  23230. ++ return ERR_PTR(-ENOMEM);
  23231. +
  23232. + cmd = (struct wmi_peer_delete_cmd *)skb->data;
  23233. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23234. +- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  23235. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  23236. +
  23237. +- ath10k_dbg(ATH10K_DBG_WMI,
  23238. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23239. + "wmi peer delete vdev_id %d peer_addr %pM\n",
  23240. + vdev_id, peer_addr);
  23241. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  23242. ++ return skb;
  23243. + }
  23244. +
  23245. +-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  23246. +- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  23247. ++static struct sk_buff *
  23248. ++ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
  23249. ++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  23250. + {
  23251. + struct wmi_peer_flush_tids_cmd *cmd;
  23252. + struct sk_buff *skb;
  23253. +
  23254. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23255. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23256. + if (!skb)
  23257. +- return -ENOMEM;
  23258. ++ return ERR_PTR(-ENOMEM);
  23259. +
  23260. + cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  23261. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23262. + cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  23263. +- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  23264. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  23265. +
  23266. +- ath10k_dbg(ATH10K_DBG_WMI,
  23267. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23268. + "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  23269. + vdev_id, peer_addr, tid_bitmap);
  23270. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  23271. ++ return skb;
  23272. + }
  23273. +
  23274. +-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  23275. +- const u8 *peer_addr, enum wmi_peer_param param_id,
  23276. +- u32 param_value)
  23277. ++static struct sk_buff *
  23278. ++ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
  23279. ++ const u8 *peer_addr,
  23280. ++ enum wmi_peer_param param_id,
  23281. ++ u32 param_value)
  23282. + {
  23283. + struct wmi_peer_set_param_cmd *cmd;
  23284. + struct sk_buff *skb;
  23285. +
  23286. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23287. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23288. + if (!skb)
  23289. +- return -ENOMEM;
  23290. ++ return ERR_PTR(-ENOMEM);
  23291. +
  23292. + cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  23293. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23294. + cmd->param_id = __cpu_to_le32(param_id);
  23295. + cmd->param_value = __cpu_to_le32(param_value);
  23296. +- memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
  23297. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  23298. +
  23299. +- ath10k_dbg(ATH10K_DBG_WMI,
  23300. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23301. + "wmi vdev %d peer 0x%pM set param %d value %d\n",
  23302. + vdev_id, peer_addr, param_id, param_value);
  23303. +-
  23304. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  23305. ++ return skb;
  23306. + }
  23307. +
  23308. +-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  23309. +- enum wmi_sta_ps_mode psmode)
  23310. ++static struct sk_buff *
  23311. ++ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
  23312. ++ enum wmi_sta_ps_mode psmode)
  23313. + {
  23314. + struct wmi_sta_powersave_mode_cmd *cmd;
  23315. + struct sk_buff *skb;
  23316. +
  23317. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23318. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23319. + if (!skb)
  23320. +- return -ENOMEM;
  23321. ++ return ERR_PTR(-ENOMEM);
  23322. +
  23323. + cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  23324. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23325. + cmd->sta_ps_mode = __cpu_to_le32(psmode);
  23326. +
  23327. +- ath10k_dbg(ATH10K_DBG_WMI,
  23328. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23329. + "wmi set powersave id 0x%x mode %d\n",
  23330. + vdev_id, psmode);
  23331. +-
  23332. +- return ath10k_wmi_cmd_send(ar, skb,
  23333. +- ar->wmi.cmd->sta_powersave_mode_cmdid);
  23334. ++ return skb;
  23335. + }
  23336. +
  23337. +-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  23338. +- enum wmi_sta_powersave_param param_id,
  23339. +- u32 value)
  23340. ++static struct sk_buff *
  23341. ++ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
  23342. ++ enum wmi_sta_powersave_param param_id,
  23343. ++ u32 value)
  23344. + {
  23345. + struct wmi_sta_powersave_param_cmd *cmd;
  23346. + struct sk_buff *skb;
  23347. +
  23348. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23349. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23350. + if (!skb)
  23351. +- return -ENOMEM;
  23352. ++ return ERR_PTR(-ENOMEM);
  23353. +
  23354. + cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  23355. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23356. + cmd->param_id = __cpu_to_le32(param_id);
  23357. + cmd->param_value = __cpu_to_le32(value);
  23358. +
  23359. +- ath10k_dbg(ATH10K_DBG_WMI,
  23360. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23361. + "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  23362. + vdev_id, param_id, value);
  23363. +- return ath10k_wmi_cmd_send(ar, skb,
  23364. +- ar->wmi.cmd->sta_powersave_param_cmdid);
  23365. ++ return skb;
  23366. + }
  23367. +
  23368. +-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  23369. +- enum wmi_ap_ps_peer_param param_id, u32 value)
  23370. ++static struct sk_buff *
  23371. ++ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  23372. ++ enum wmi_ap_ps_peer_param param_id, u32 value)
  23373. + {
  23374. + struct wmi_ap_ps_peer_cmd *cmd;
  23375. + struct sk_buff *skb;
  23376. +
  23377. + if (!mac)
  23378. +- return -EINVAL;
  23379. ++ return ERR_PTR(-EINVAL);
  23380. +
  23381. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23382. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23383. + if (!skb)
  23384. +- return -ENOMEM;
  23385. ++ return ERR_PTR(-ENOMEM);
  23386. +
  23387. + cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  23388. + cmd->vdev_id = __cpu_to_le32(vdev_id);
  23389. + cmd->param_id = __cpu_to_le32(param_id);
  23390. + cmd->param_value = __cpu_to_le32(value);
  23391. +- memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
  23392. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  23393. +
  23394. +- ath10k_dbg(ATH10K_DBG_WMI,
  23395. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23396. + "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  23397. + vdev_id, param_id, value, mac);
  23398. +-
  23399. +- return ath10k_wmi_cmd_send(ar, skb,
  23400. +- ar->wmi.cmd->ap_ps_peer_param_cmdid);
  23401. ++ return skb;
  23402. + }
  23403. +
  23404. +-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  23405. +- const struct wmi_scan_chan_list_arg *arg)
  23406. ++static struct sk_buff *
  23407. ++ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
  23408. ++ const struct wmi_scan_chan_list_arg *arg)
  23409. + {
  23410. + struct wmi_scan_chan_list_cmd *cmd;
  23411. + struct sk_buff *skb;
  23412. +@@ -3395,66 +4709,29 @@ int ath10k_wmi_scan_chan_list(struct ath
  23413. +
  23414. + len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  23415. +
  23416. +- skb = ath10k_wmi_alloc_skb(len);
  23417. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  23418. + if (!skb)
  23419. +- return -EINVAL;
  23420. ++ return ERR_PTR(-EINVAL);
  23421. +
  23422. + cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  23423. + cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  23424. +
  23425. + for (i = 0; i < arg->n_channels; i++) {
  23426. +- u32 flags = 0;
  23427. +-
  23428. + ch = &arg->channels[i];
  23429. + ci = &cmd->chan_info[i];
  23430. +
  23431. +- if (ch->passive)
  23432. +- flags |= WMI_CHAN_FLAG_PASSIVE;
  23433. +- if (ch->allow_ibss)
  23434. +- flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  23435. +- if (ch->allow_ht)
  23436. +- flags |= WMI_CHAN_FLAG_ALLOW_HT;
  23437. +- if (ch->allow_vht)
  23438. +- flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  23439. +- if (ch->ht40plus)
  23440. +- flags |= WMI_CHAN_FLAG_HT40_PLUS;
  23441. +- if (ch->chan_radar)
  23442. +- flags |= WMI_CHAN_FLAG_DFS;
  23443. +-
  23444. +- ci->mhz = __cpu_to_le32(ch->freq);
  23445. +- ci->band_center_freq1 = __cpu_to_le32(ch->freq);
  23446. +- ci->band_center_freq2 = 0;
  23447. +- ci->min_power = ch->min_power;
  23448. +- ci->max_power = ch->max_power;
  23449. +- ci->reg_power = ch->max_reg_power;
  23450. +- ci->antenna_max = ch->max_antenna_gain;
  23451. +-
  23452. +- /* mode & flags share storage */
  23453. +- ci->mode = ch->mode;
  23454. +- ci->flags |= __cpu_to_le32(flags);
  23455. ++ ath10k_wmi_put_wmi_channel(ci, ch);
  23456. + }
  23457. +
  23458. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  23459. ++ return skb;
  23460. + }
  23461. +
  23462. +-int ath10k_wmi_peer_assoc(struct ath10k *ar,
  23463. +- const struct wmi_peer_assoc_complete_arg *arg)
  23464. ++static void
  23465. ++ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
  23466. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23467. + {
  23468. +- struct wmi_peer_assoc_complete_cmd *cmd;
  23469. +- struct sk_buff *skb;
  23470. ++ struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
  23471. +
  23472. +- if (arg->peer_mpdu_density > 16)
  23473. +- return -EINVAL;
  23474. +- if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  23475. +- return -EINVAL;
  23476. +- if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  23477. +- return -EINVAL;
  23478. +-
  23479. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23480. +- if (!skb)
  23481. +- return -ENOMEM;
  23482. +-
  23483. +- cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
  23484. + cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  23485. + cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  23486. + cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  23487. +@@ -3469,7 +4746,7 @@ int ath10k_wmi_peer_assoc(struct ath10k
  23488. + cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  23489. + cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  23490. +
  23491. +- memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
  23492. ++ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  23493. +
  23494. + cmd->peer_legacy_rates.num_rates =
  23495. + __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  23496. +@@ -3489,57 +4766,183 @@ int ath10k_wmi_peer_assoc(struct ath10k
  23497. + __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  23498. + cmd->peer_vht_rates.tx_mcs_set =
  23499. + __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  23500. ++}
  23501. ++
  23502. ++static void
  23503. ++ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
  23504. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23505. ++{
  23506. ++ struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
  23507. +
  23508. +- ath10k_dbg(ATH10K_DBG_WMI,
  23509. ++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  23510. ++ memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
  23511. ++}
  23512. ++
  23513. ++static void
  23514. ++ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
  23515. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23516. ++{
  23517. ++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  23518. ++}
  23519. ++
  23520. ++static void
  23521. ++ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
  23522. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23523. ++{
  23524. ++ struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
  23525. ++ int max_mcs, max_nss;
  23526. ++ u32 info0;
  23527. ++
  23528. ++ /* TODO: Is using max values okay with firmware? */
  23529. ++ max_mcs = 0xf;
  23530. ++ max_nss = 0xf;
  23531. ++
  23532. ++ info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
  23533. ++ SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
  23534. ++
  23535. ++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  23536. ++ cmd->info0 = __cpu_to_le32(info0);
  23537. ++}
  23538. ++
  23539. ++static int
  23540. ++ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
  23541. ++{
  23542. ++ if (arg->peer_mpdu_density > 16)
  23543. ++ return -EINVAL;
  23544. ++ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  23545. ++ return -EINVAL;
  23546. ++ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  23547. ++ return -EINVAL;
  23548. ++
  23549. ++ return 0;
  23550. ++}
  23551. ++
  23552. ++static struct sk_buff *
  23553. ++ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
  23554. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23555. ++{
  23556. ++ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
  23557. ++ struct sk_buff *skb;
  23558. ++ int ret;
  23559. ++
  23560. ++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
  23561. ++ if (ret)
  23562. ++ return ERR_PTR(ret);
  23563. ++
  23564. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  23565. ++ if (!skb)
  23566. ++ return ERR_PTR(-ENOMEM);
  23567. ++
  23568. ++ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
  23569. ++
  23570. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23571. ++ "wmi peer assoc vdev %d addr %pM (%s)\n",
  23572. ++ arg->vdev_id, arg->addr,
  23573. ++ arg->peer_reassoc ? "reassociate" : "new");
  23574. ++ return skb;
  23575. ++}
  23576. ++
  23577. ++static struct sk_buff *
  23578. ++ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
  23579. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23580. ++{
  23581. ++ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
  23582. ++ struct sk_buff *skb;
  23583. ++ int ret;
  23584. ++
  23585. ++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
  23586. ++ if (ret)
  23587. ++ return ERR_PTR(ret);
  23588. ++
  23589. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  23590. ++ if (!skb)
  23591. ++ return ERR_PTR(-ENOMEM);
  23592. ++
  23593. ++ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
  23594. ++
  23595. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23596. ++ "wmi peer assoc vdev %d addr %pM (%s)\n",
  23597. ++ arg->vdev_id, arg->addr,
  23598. ++ arg->peer_reassoc ? "reassociate" : "new");
  23599. ++ return skb;
  23600. ++}
  23601. ++
  23602. ++static struct sk_buff *
  23603. ++ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
  23604. ++ const struct wmi_peer_assoc_complete_arg *arg)
  23605. ++{
  23606. ++ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
  23607. ++ struct sk_buff *skb;
  23608. ++ int ret;
  23609. ++
  23610. ++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
  23611. ++ if (ret)
  23612. ++ return ERR_PTR(ret);
  23613. ++
  23614. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  23615. ++ if (!skb)
  23616. ++ return ERR_PTR(-ENOMEM);
  23617. ++
  23618. ++ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
  23619. ++
  23620. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23621. + "wmi peer assoc vdev %d addr %pM (%s)\n",
  23622. + arg->vdev_id, arg->addr,
  23623. + arg->peer_reassoc ? "reassociate" : "new");
  23624. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  23625. ++ return skb;
  23626. ++}
  23627. ++
  23628. ++static struct sk_buff *
  23629. ++ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
  23630. ++{
  23631. ++ struct sk_buff *skb;
  23632. ++
  23633. ++ skb = ath10k_wmi_alloc_skb(ar, 0);
  23634. ++ if (!skb)
  23635. ++ return ERR_PTR(-ENOMEM);
  23636. ++
  23637. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
  23638. ++ return skb;
  23639. + }
  23640. +
  23641. + /* This function assumes the beacon is already DMA mapped */
  23642. +-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
  23643. ++static struct sk_buff *
  23644. ++ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
  23645. ++ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
  23646. ++ bool deliver_cab)
  23647. + {
  23648. + struct wmi_bcn_tx_ref_cmd *cmd;
  23649. + struct sk_buff *skb;
  23650. +- struct sk_buff *beacon = arvif->beacon;
  23651. +- struct ath10k *ar = arvif->ar;
  23652. + struct ieee80211_hdr *hdr;
  23653. +- int ret;
  23654. + u16 fc;
  23655. +
  23656. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23657. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23658. + if (!skb)
  23659. +- return -ENOMEM;
  23660. ++ return ERR_PTR(-ENOMEM);
  23661. +
  23662. +- hdr = (struct ieee80211_hdr *)beacon->data;
  23663. ++ hdr = (struct ieee80211_hdr *)bcn;
  23664. + fc = le16_to_cpu(hdr->frame_control);
  23665. +
  23666. + cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
  23667. +- cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
  23668. +- cmd->data_len = __cpu_to_le32(beacon->len);
  23669. +- cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
  23670. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23671. ++ cmd->data_len = __cpu_to_le32(bcn_len);
  23672. ++ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
  23673. + cmd->msdu_id = 0;
  23674. + cmd->frame_control = __cpu_to_le32(fc);
  23675. + cmd->flags = 0;
  23676. ++ cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
  23677. +
  23678. +- if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
  23679. ++ if (dtim_zero)
  23680. + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  23681. +
  23682. +- if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
  23683. ++ if (deliver_cab)
  23684. + cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  23685. +
  23686. +- ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  23687. +- ar->wmi.cmd->pdev_send_bcn_cmdid);
  23688. +-
  23689. +- if (ret)
  23690. +- dev_kfree_skb(skb);
  23691. +-
  23692. +- return ret;
  23693. ++ return skb;
  23694. + }
  23695. +
  23696. +-static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
  23697. +- const struct wmi_wmm_params_arg *arg)
  23698. ++void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
  23699. ++ const struct wmi_wmm_params_arg *arg)
  23700. + {
  23701. + params->cwmin = __cpu_to_le32(arg->cwmin);
  23702. + params->cwmax = __cpu_to_le32(arg->cwmax);
  23703. +@@ -3549,76 +4952,81 @@ static void ath10k_wmi_pdev_set_wmm_para
  23704. + params->no_ack = __cpu_to_le32(arg->no_ack);
  23705. + }
  23706. +
  23707. +-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  23708. +- const struct wmi_pdev_set_wmm_params_arg *arg)
  23709. ++static struct sk_buff *
  23710. ++ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
  23711. ++ const struct wmi_wmm_params_all_arg *arg)
  23712. + {
  23713. + struct wmi_pdev_set_wmm_params *cmd;
  23714. + struct sk_buff *skb;
  23715. +
  23716. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23717. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23718. + if (!skb)
  23719. +- return -ENOMEM;
  23720. ++ return ERR_PTR(-ENOMEM);
  23721. +
  23722. + cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  23723. +- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  23724. +- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  23725. +- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  23726. +- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  23727. ++ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  23728. ++ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  23729. ++ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  23730. ++ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  23731. +
  23732. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  23733. +- return ath10k_wmi_cmd_send(ar, skb,
  23734. +- ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  23735. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  23736. ++ return skb;
  23737. + }
  23738. +
  23739. +-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
  23740. ++static struct sk_buff *
  23741. ++ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
  23742. + {
  23743. + struct wmi_request_stats_cmd *cmd;
  23744. + struct sk_buff *skb;
  23745. +
  23746. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23747. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23748. + if (!skb)
  23749. +- return -ENOMEM;
  23750. ++ return ERR_PTR(-ENOMEM);
  23751. +
  23752. + cmd = (struct wmi_request_stats_cmd *)skb->data;
  23753. +- cmd->stats_id = __cpu_to_le32(stats_id);
  23754. ++ cmd->stats_id = __cpu_to_le32(stats_mask);
  23755. +
  23756. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
  23757. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  23758. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
  23759. ++ stats_mask);
  23760. ++ return skb;
  23761. + }
  23762. +
  23763. +-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
  23764. +- enum wmi_force_fw_hang_type type, u32 delay_ms)
  23765. ++static struct sk_buff *
  23766. ++ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
  23767. ++ enum wmi_force_fw_hang_type type, u32 delay_ms)
  23768. + {
  23769. + struct wmi_force_fw_hang_cmd *cmd;
  23770. + struct sk_buff *skb;
  23771. +
  23772. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23773. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23774. + if (!skb)
  23775. +- return -ENOMEM;
  23776. ++ return ERR_PTR(-ENOMEM);
  23777. +
  23778. + cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  23779. + cmd->type = __cpu_to_le32(type);
  23780. + cmd->delay_ms = __cpu_to_le32(delay_ms);
  23781. +
  23782. +- ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  23783. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  23784. + type, delay_ms);
  23785. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  23786. ++ return skb;
  23787. + }
  23788. +
  23789. +-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
  23790. ++static struct sk_buff *
  23791. ++ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
  23792. ++ u32 log_level)
  23793. + {
  23794. + struct wmi_dbglog_cfg_cmd *cmd;
  23795. + struct sk_buff *skb;
  23796. + u32 cfg;
  23797. +
  23798. +- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
  23799. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23800. + if (!skb)
  23801. +- return -ENOMEM;
  23802. ++ return ERR_PTR(-ENOMEM);
  23803. +
  23804. + cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
  23805. +
  23806. + if (module_enable) {
  23807. +- cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
  23808. ++ cfg = SM(log_level,
  23809. + ATH10K_DBGLOG_CFG_LOG_LVL);
  23810. + } else {
  23811. + /* set back defaults, all modules with WARN level */
  23812. +@@ -3632,12 +5040,474 @@ int ath10k_wmi_dbglog_cfg(struct ath10k
  23813. + cmd->config_enable = __cpu_to_le32(cfg);
  23814. + cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
  23815. +
  23816. +- ath10k_dbg(ATH10K_DBG_WMI,
  23817. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23818. + "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
  23819. + __le32_to_cpu(cmd->module_enable),
  23820. + __le32_to_cpu(cmd->module_valid),
  23821. + __le32_to_cpu(cmd->config_enable),
  23822. + __le32_to_cpu(cmd->config_valid));
  23823. ++ return skb;
  23824. ++}
  23825. ++
  23826. ++static struct sk_buff *
  23827. ++ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
  23828. ++{
  23829. ++ struct wmi_pdev_pktlog_enable_cmd *cmd;
  23830. ++ struct sk_buff *skb;
  23831. ++
  23832. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23833. ++ if (!skb)
  23834. ++ return ERR_PTR(-ENOMEM);
  23835. ++
  23836. ++ ev_bitmap &= ATH10K_PKTLOG_ANY;
  23837. ++
  23838. ++ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
  23839. ++ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
  23840. ++
  23841. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
  23842. ++ ev_bitmap);
  23843. ++ return skb;
  23844. ++}
  23845. ++
  23846. ++static struct sk_buff *
  23847. ++ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
  23848. ++{
  23849. ++ struct sk_buff *skb;
  23850. +
  23851. +- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  23852. ++ skb = ath10k_wmi_alloc_skb(ar, 0);
  23853. ++ if (!skb)
  23854. ++ return ERR_PTR(-ENOMEM);
  23855. ++
  23856. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
  23857. ++ return skb;
  23858. ++}
  23859. ++
  23860. ++static struct sk_buff *
  23861. ++ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
  23862. ++ u32 duration, u32 next_offset,
  23863. ++ u32 enabled)
  23864. ++{
  23865. ++ struct wmi_pdev_set_quiet_cmd *cmd;
  23866. ++ struct sk_buff *skb;
  23867. ++
  23868. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23869. ++ if (!skb)
  23870. ++ return ERR_PTR(-ENOMEM);
  23871. ++
  23872. ++ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
  23873. ++ cmd->period = __cpu_to_le32(period);
  23874. ++ cmd->duration = __cpu_to_le32(duration);
  23875. ++ cmd->next_start = __cpu_to_le32(next_offset);
  23876. ++ cmd->enabled = __cpu_to_le32(enabled);
  23877. ++
  23878. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23879. ++ "wmi quiet param: period %u duration %u enabled %d\n",
  23880. ++ period, duration, enabled);
  23881. ++ return skb;
  23882. ++}
  23883. ++
  23884. ++static struct sk_buff *
  23885. ++ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
  23886. ++ const u8 *mac)
  23887. ++{
  23888. ++ struct wmi_addba_clear_resp_cmd *cmd;
  23889. ++ struct sk_buff *skb;
  23890. ++
  23891. ++ if (!mac)
  23892. ++ return ERR_PTR(-EINVAL);
  23893. ++
  23894. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23895. ++ if (!skb)
  23896. ++ return ERR_PTR(-ENOMEM);
  23897. ++
  23898. ++ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
  23899. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23900. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  23901. ++
  23902. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23903. ++ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
  23904. ++ vdev_id, mac);
  23905. ++ return skb;
  23906. ++}
  23907. ++
  23908. ++static struct sk_buff *
  23909. ++ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  23910. ++ u32 tid, u32 buf_size)
  23911. ++{
  23912. ++ struct wmi_addba_send_cmd *cmd;
  23913. ++ struct sk_buff *skb;
  23914. ++
  23915. ++ if (!mac)
  23916. ++ return ERR_PTR(-EINVAL);
  23917. ++
  23918. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23919. ++ if (!skb)
  23920. ++ return ERR_PTR(-ENOMEM);
  23921. ++
  23922. ++ cmd = (struct wmi_addba_send_cmd *)skb->data;
  23923. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23924. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  23925. ++ cmd->tid = __cpu_to_le32(tid);
  23926. ++ cmd->buffersize = __cpu_to_le32(buf_size);
  23927. ++
  23928. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23929. ++ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
  23930. ++ vdev_id, mac, tid, buf_size);
  23931. ++ return skb;
  23932. ++}
  23933. ++
  23934. ++static struct sk_buff *
  23935. ++ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  23936. ++ u32 tid, u32 status)
  23937. ++{
  23938. ++ struct wmi_addba_setresponse_cmd *cmd;
  23939. ++ struct sk_buff *skb;
  23940. ++
  23941. ++ if (!mac)
  23942. ++ return ERR_PTR(-EINVAL);
  23943. ++
  23944. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23945. ++ if (!skb)
  23946. ++ return ERR_PTR(-ENOMEM);
  23947. ++
  23948. ++ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
  23949. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23950. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  23951. ++ cmd->tid = __cpu_to_le32(tid);
  23952. ++ cmd->statuscode = __cpu_to_le32(status);
  23953. ++
  23954. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23955. ++ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
  23956. ++ vdev_id, mac, tid, status);
  23957. ++ return skb;
  23958. ++}
  23959. ++
  23960. ++static struct sk_buff *
  23961. ++ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  23962. ++ u32 tid, u32 initiator, u32 reason)
  23963. ++{
  23964. ++ struct wmi_delba_send_cmd *cmd;
  23965. ++ struct sk_buff *skb;
  23966. ++
  23967. ++ if (!mac)
  23968. ++ return ERR_PTR(-EINVAL);
  23969. ++
  23970. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  23971. ++ if (!skb)
  23972. ++ return ERR_PTR(-ENOMEM);
  23973. ++
  23974. ++ cmd = (struct wmi_delba_send_cmd *)skb->data;
  23975. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  23976. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  23977. ++ cmd->tid = __cpu_to_le32(tid);
  23978. ++ cmd->initiator = __cpu_to_le32(initiator);
  23979. ++ cmd->reasoncode = __cpu_to_le32(reason);
  23980. ++
  23981. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  23982. ++ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
  23983. ++ vdev_id, mac, tid, initiator, reason);
  23984. ++ return skb;
  23985. ++}
  23986. ++
  23987. ++static const struct wmi_ops wmi_ops = {
  23988. ++ .rx = ath10k_wmi_op_rx,
  23989. ++ .map_svc = wmi_main_svc_map,
  23990. ++
  23991. ++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
  23992. ++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  23993. ++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  23994. ++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  23995. ++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  23996. ++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
  23997. ++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  23998. ++ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
  23999. ++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  24000. ++ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
  24001. ++
  24002. ++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  24003. ++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  24004. ++ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
  24005. ++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  24006. ++ .gen_init = ath10k_wmi_op_gen_init,
  24007. ++ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
  24008. ++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  24009. ++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  24010. ++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  24011. ++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  24012. ++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  24013. ++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  24014. ++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  24015. ++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  24016. ++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  24017. ++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  24018. ++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  24019. ++ /* .gen_vdev_wmm_conf not implemented */
  24020. ++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  24021. ++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  24022. ++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  24023. ++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  24024. ++ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
  24025. ++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  24026. ++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  24027. ++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  24028. ++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  24029. ++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  24030. ++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  24031. ++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  24032. ++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  24033. ++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  24034. ++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  24035. ++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  24036. ++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  24037. ++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  24038. ++ /* .gen_pdev_get_temperature not implemented */
  24039. ++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  24040. ++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  24041. ++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  24042. ++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  24043. ++ /* .gen_bcn_tmpl not implemented */
  24044. ++ /* .gen_prb_tmpl not implemented */
  24045. ++ /* .gen_p2p_go_bcn_ie not implemented */
  24046. ++};
  24047. ++
  24048. ++static const struct wmi_ops wmi_10_1_ops = {
  24049. ++ .rx = ath10k_wmi_10_1_op_rx,
  24050. ++ .map_svc = wmi_10x_svc_map,
  24051. ++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  24052. ++ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
  24053. ++ .gen_init = ath10k_wmi_10_1_op_gen_init,
  24054. ++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  24055. ++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  24056. ++ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
  24057. ++ /* .gen_pdev_get_temperature not implemented */
  24058. ++
  24059. ++ /* shared with main branch */
  24060. ++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
  24061. ++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  24062. ++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  24063. ++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  24064. ++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  24065. ++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
  24066. ++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  24067. ++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  24068. ++
  24069. ++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  24070. ++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  24071. ++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  24072. ++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  24073. ++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  24074. ++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  24075. ++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  24076. ++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  24077. ++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  24078. ++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  24079. ++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  24080. ++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  24081. ++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  24082. ++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  24083. ++ /* .gen_vdev_wmm_conf not implemented */
  24084. ++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  24085. ++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  24086. ++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  24087. ++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  24088. ++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  24089. ++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  24090. ++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  24091. ++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  24092. ++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  24093. ++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  24094. ++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  24095. ++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  24096. ++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  24097. ++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  24098. ++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  24099. ++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  24100. ++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  24101. ++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  24102. ++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  24103. ++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  24104. ++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  24105. ++ /* .gen_bcn_tmpl not implemented */
  24106. ++ /* .gen_prb_tmpl not implemented */
  24107. ++ /* .gen_p2p_go_bcn_ie not implemented */
  24108. ++};
  24109. ++
  24110. ++static const struct wmi_ops wmi_10_2_ops = {
  24111. ++ .rx = ath10k_wmi_10_2_op_rx,
  24112. ++ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
  24113. ++ .gen_init = ath10k_wmi_10_2_op_gen_init,
  24114. ++ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
  24115. ++ /* .gen_pdev_get_temperature not implemented */
  24116. ++
  24117. ++ /* shared with 10.1 */
  24118. ++ .map_svc = wmi_10x_svc_map,
  24119. ++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  24120. ++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  24121. ++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  24122. ++
  24123. ++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
  24124. ++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  24125. ++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  24126. ++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  24127. ++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  24128. ++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
  24129. ++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  24130. ++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  24131. ++
  24132. ++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  24133. ++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  24134. ++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  24135. ++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  24136. ++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  24137. ++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  24138. ++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  24139. ++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  24140. ++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  24141. ++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  24142. ++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  24143. ++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  24144. ++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  24145. ++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  24146. ++ /* .gen_vdev_wmm_conf not implemented */
  24147. ++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  24148. ++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  24149. ++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  24150. ++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  24151. ++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  24152. ++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  24153. ++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  24154. ++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  24155. ++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  24156. ++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  24157. ++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  24158. ++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  24159. ++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  24160. ++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  24161. ++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  24162. ++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  24163. ++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  24164. ++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  24165. ++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  24166. ++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  24167. ++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  24168. ++};
  24169. ++
  24170. ++static const struct wmi_ops wmi_10_2_4_ops = {
  24171. ++ .rx = ath10k_wmi_10_2_op_rx,
  24172. ++ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
  24173. ++ .gen_init = ath10k_wmi_10_2_op_gen_init,
  24174. ++ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
  24175. ++ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
  24176. ++
  24177. ++ /* shared with 10.1 */
  24178. ++ .map_svc = wmi_10x_svc_map,
  24179. ++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  24180. ++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  24181. ++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  24182. ++
  24183. ++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
  24184. ++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  24185. ++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  24186. ++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  24187. ++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  24188. ++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
  24189. ++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  24190. ++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  24191. ++
  24192. ++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  24193. ++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  24194. ++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  24195. ++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  24196. ++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  24197. ++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  24198. ++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  24199. ++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  24200. ++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  24201. ++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  24202. ++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  24203. ++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  24204. ++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  24205. ++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  24206. ++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  24207. ++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  24208. ++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  24209. ++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  24210. ++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  24211. ++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  24212. ++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  24213. ++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  24214. ++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  24215. ++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  24216. ++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  24217. ++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  24218. ++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  24219. ++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  24220. ++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  24221. ++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  24222. ++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  24223. ++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  24224. ++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  24225. ++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  24226. ++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  24227. ++ /* .gen_bcn_tmpl not implemented */
  24228. ++ /* .gen_prb_tmpl not implemented */
  24229. ++ /* .gen_p2p_go_bcn_ie not implemented */
  24230. ++};
  24231. ++
  24232. ++int ath10k_wmi_attach(struct ath10k *ar)
  24233. ++{
  24234. ++ switch (ar->wmi.op_version) {
  24235. ++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
  24236. ++ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
  24237. ++ ar->wmi.ops = &wmi_10_2_4_ops;
  24238. ++ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
  24239. ++ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
  24240. ++ break;
  24241. ++ case ATH10K_FW_WMI_OP_VERSION_10_2:
  24242. ++ ar->wmi.cmd = &wmi_10_2_cmd_map;
  24243. ++ ar->wmi.ops = &wmi_10_2_ops;
  24244. ++ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  24245. ++ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  24246. ++ break;
  24247. ++ case ATH10K_FW_WMI_OP_VERSION_10_1:
  24248. ++ ar->wmi.cmd = &wmi_10x_cmd_map;
  24249. ++ ar->wmi.ops = &wmi_10_1_ops;
  24250. ++ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  24251. ++ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  24252. ++ break;
  24253. ++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
  24254. ++ ar->wmi.cmd = &wmi_cmd_map;
  24255. ++ ar->wmi.ops = &wmi_ops;
  24256. ++ ar->wmi.vdev_param = &wmi_vdev_param_map;
  24257. ++ ar->wmi.pdev_param = &wmi_pdev_param_map;
  24258. ++ break;
  24259. ++ case ATH10K_FW_WMI_OP_VERSION_TLV:
  24260. ++ ath10k_wmi_tlv_attach(ar);
  24261. ++ break;
  24262. ++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
  24263. ++ case ATH10K_FW_WMI_OP_VERSION_MAX:
  24264. ++ ath10k_err(ar, "unsupported WMI op version: %d\n",
  24265. ++ ar->wmi.op_version);
  24266. ++ return -EINVAL;
  24267. ++ }
  24268. ++
  24269. ++ init_completion(&ar->wmi.service_ready);
  24270. ++ init_completion(&ar->wmi.unified_ready);
  24271. ++
  24272. ++ return 0;
  24273. ++}
  24274. ++
  24275. ++void ath10k_wmi_detach(struct ath10k *ar)
  24276. ++{
  24277. ++ int i;
  24278. ++
  24279. ++ /* free the host memory chunks requested by firmware */
  24280. ++ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  24281. ++ dma_free_coherent(ar->dev,
  24282. ++ ar->wmi.mem_chunks[i].len,
  24283. ++ ar->wmi.mem_chunks[i].vaddr,
  24284. ++ ar->wmi.mem_chunks[i].paddr);
  24285. ++ }
  24286. ++
  24287. ++ ar->wmi.num_mem_chunks = 0;
  24288. + }
  24289. +--- a/drivers/net/wireless/ath/ath10k/wmi.h
  24290. ++++ b/drivers/net/wireless/ath/ath10k/wmi.h
  24291. +@@ -73,119 +73,361 @@ struct wmi_cmd_hdr {
  24292. + #define HTC_PROTOCOL_VERSION 0x0002
  24293. + #define WMI_PROTOCOL_VERSION 0x0002
  24294. +
  24295. +-enum wmi_service_id {
  24296. +- WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
  24297. +- WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
  24298. +- WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */
  24299. +- WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
  24300. +- WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
  24301. +- WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
  24302. +- WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
  24303. +- WMI_SERVICE_AP_DFS, /* DFS on AP */
  24304. +- WMI_SERVICE_11AC, /* supports 11ac */
  24305. +- WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
  24306. +- WMI_SERVICE_PHYERR, /* PHY error */
  24307. +- WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
  24308. +- WMI_SERVICE_RTT, /* RTT (round trip time) support */
  24309. +- WMI_SERVICE_RATECTRL, /* Rate-control */
  24310. +- WMI_SERVICE_WOW, /* WOW Support */
  24311. +- WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
  24312. +- WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
  24313. +- WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */
  24314. +- WMI_SERVICE_NLO, /* Network list offload service */
  24315. +- WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
  24316. +- WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
  24317. +- WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
  24318. +- WMI_SERVICE_CHATTER, /* Chatter service */
  24319. +- WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
  24320. +- WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
  24321. +- WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */
  24322. +- WMI_SERVICE_GPIO, /* GPIO service */
  24323. +- WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
  24324. +- WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */
  24325. +- WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */
  24326. +- WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */
  24327. +- WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
  24328. +-
  24329. +- WMI_SERVICE_LAST,
  24330. +- WMI_MAX_SERVICE = 64 /* max service */
  24331. ++enum wmi_service {
  24332. ++ WMI_SERVICE_BEACON_OFFLOAD = 0,
  24333. ++ WMI_SERVICE_SCAN_OFFLOAD,
  24334. ++ WMI_SERVICE_ROAM_OFFLOAD,
  24335. ++ WMI_SERVICE_BCN_MISS_OFFLOAD,
  24336. ++ WMI_SERVICE_STA_PWRSAVE,
  24337. ++ WMI_SERVICE_STA_ADVANCED_PWRSAVE,
  24338. ++ WMI_SERVICE_AP_UAPSD,
  24339. ++ WMI_SERVICE_AP_DFS,
  24340. ++ WMI_SERVICE_11AC,
  24341. ++ WMI_SERVICE_BLOCKACK,
  24342. ++ WMI_SERVICE_PHYERR,
  24343. ++ WMI_SERVICE_BCN_FILTER,
  24344. ++ WMI_SERVICE_RTT,
  24345. ++ WMI_SERVICE_RATECTRL,
  24346. ++ WMI_SERVICE_WOW,
  24347. ++ WMI_SERVICE_RATECTRL_CACHE,
  24348. ++ WMI_SERVICE_IRAM_TIDS,
  24349. ++ WMI_SERVICE_ARPNS_OFFLOAD,
  24350. ++ WMI_SERVICE_NLO,
  24351. ++ WMI_SERVICE_GTK_OFFLOAD,
  24352. ++ WMI_SERVICE_SCAN_SCH,
  24353. ++ WMI_SERVICE_CSA_OFFLOAD,
  24354. ++ WMI_SERVICE_CHATTER,
  24355. ++ WMI_SERVICE_COEX_FREQAVOID,
  24356. ++ WMI_SERVICE_PACKET_POWER_SAVE,
  24357. ++ WMI_SERVICE_FORCE_FW_HANG,
  24358. ++ WMI_SERVICE_GPIO,
  24359. ++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
  24360. ++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
  24361. ++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
  24362. ++ WMI_SERVICE_STA_KEEP_ALIVE,
  24363. ++ WMI_SERVICE_TX_ENCAP,
  24364. ++ WMI_SERVICE_BURST,
  24365. ++ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
  24366. ++ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
  24367. ++ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
  24368. ++ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
  24369. ++ WMI_SERVICE_EARLY_RX,
  24370. ++ WMI_SERVICE_STA_SMPS,
  24371. ++ WMI_SERVICE_FWTEST,
  24372. ++ WMI_SERVICE_STA_WMMAC,
  24373. ++ WMI_SERVICE_TDLS,
  24374. ++ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
  24375. ++ WMI_SERVICE_ADAPTIVE_OCS,
  24376. ++ WMI_SERVICE_BA_SSN_SUPPORT,
  24377. ++ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
  24378. ++ WMI_SERVICE_WLAN_HB,
  24379. ++ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
  24380. ++ WMI_SERVICE_BATCH_SCAN,
  24381. ++ WMI_SERVICE_QPOWER,
  24382. ++ WMI_SERVICE_PLMREQ,
  24383. ++ WMI_SERVICE_THERMAL_MGMT,
  24384. ++ WMI_SERVICE_RMC,
  24385. ++ WMI_SERVICE_MHF_OFFLOAD,
  24386. ++ WMI_SERVICE_COEX_SAR,
  24387. ++ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
  24388. ++ WMI_SERVICE_NAN,
  24389. ++ WMI_SERVICE_L1SS_STAT,
  24390. ++ WMI_SERVICE_ESTIMATE_LINKSPEED,
  24391. ++ WMI_SERVICE_OBSS_SCAN,
  24392. ++ WMI_SERVICE_TDLS_OFFCHAN,
  24393. ++ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
  24394. ++ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
  24395. ++ WMI_SERVICE_IBSS_PWRSAVE,
  24396. ++ WMI_SERVICE_LPASS,
  24397. ++ WMI_SERVICE_EXTSCAN,
  24398. ++ WMI_SERVICE_D0WOW,
  24399. ++ WMI_SERVICE_HSOFFLOAD,
  24400. ++ WMI_SERVICE_ROAM_HO_OFFLOAD,
  24401. ++ WMI_SERVICE_RX_FULL_REORDER,
  24402. ++ WMI_SERVICE_DHCP_OFFLOAD,
  24403. ++ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
  24404. ++ WMI_SERVICE_MDNS_OFFLOAD,
  24405. ++ WMI_SERVICE_SAP_AUTH_OFFLOAD,
  24406. ++
  24407. ++ /* keep last */
  24408. ++ WMI_SERVICE_MAX,
  24409. ++};
  24410. ++
  24411. ++enum wmi_10x_service {
  24412. ++ WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
  24413. ++ WMI_10X_SERVICE_SCAN_OFFLOAD,
  24414. ++ WMI_10X_SERVICE_ROAM_OFFLOAD,
  24415. ++ WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
  24416. ++ WMI_10X_SERVICE_STA_PWRSAVE,
  24417. ++ WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
  24418. ++ WMI_10X_SERVICE_AP_UAPSD,
  24419. ++ WMI_10X_SERVICE_AP_DFS,
  24420. ++ WMI_10X_SERVICE_11AC,
  24421. ++ WMI_10X_SERVICE_BLOCKACK,
  24422. ++ WMI_10X_SERVICE_PHYERR,
  24423. ++ WMI_10X_SERVICE_BCN_FILTER,
  24424. ++ WMI_10X_SERVICE_RTT,
  24425. ++ WMI_10X_SERVICE_RATECTRL,
  24426. ++ WMI_10X_SERVICE_WOW,
  24427. ++ WMI_10X_SERVICE_RATECTRL_CACHE,
  24428. ++ WMI_10X_SERVICE_IRAM_TIDS,
  24429. ++ WMI_10X_SERVICE_BURST,
  24430. ++
  24431. ++ /* introduced in 10.2 */
  24432. ++ WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
  24433. ++ WMI_10X_SERVICE_FORCE_FW_HANG,
  24434. ++ WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
  24435. ++};
  24436. ++
  24437. ++enum wmi_main_service {
  24438. ++ WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
  24439. ++ WMI_MAIN_SERVICE_SCAN_OFFLOAD,
  24440. ++ WMI_MAIN_SERVICE_ROAM_OFFLOAD,
  24441. ++ WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
  24442. ++ WMI_MAIN_SERVICE_STA_PWRSAVE,
  24443. ++ WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
  24444. ++ WMI_MAIN_SERVICE_AP_UAPSD,
  24445. ++ WMI_MAIN_SERVICE_AP_DFS,
  24446. ++ WMI_MAIN_SERVICE_11AC,
  24447. ++ WMI_MAIN_SERVICE_BLOCKACK,
  24448. ++ WMI_MAIN_SERVICE_PHYERR,
  24449. ++ WMI_MAIN_SERVICE_BCN_FILTER,
  24450. ++ WMI_MAIN_SERVICE_RTT,
  24451. ++ WMI_MAIN_SERVICE_RATECTRL,
  24452. ++ WMI_MAIN_SERVICE_WOW,
  24453. ++ WMI_MAIN_SERVICE_RATECTRL_CACHE,
  24454. ++ WMI_MAIN_SERVICE_IRAM_TIDS,
  24455. ++ WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
  24456. ++ WMI_MAIN_SERVICE_NLO,
  24457. ++ WMI_MAIN_SERVICE_GTK_OFFLOAD,
  24458. ++ WMI_MAIN_SERVICE_SCAN_SCH,
  24459. ++ WMI_MAIN_SERVICE_CSA_OFFLOAD,
  24460. ++ WMI_MAIN_SERVICE_CHATTER,
  24461. ++ WMI_MAIN_SERVICE_COEX_FREQAVOID,
  24462. ++ WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
  24463. ++ WMI_MAIN_SERVICE_FORCE_FW_HANG,
  24464. ++ WMI_MAIN_SERVICE_GPIO,
  24465. ++ WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
  24466. ++ WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
  24467. ++ WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
  24468. ++ WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
  24469. ++ WMI_MAIN_SERVICE_TX_ENCAP,
  24470. + };
  24471. +
  24472. + static inline char *wmi_service_name(int service_id)
  24473. + {
  24474. ++#define SVCSTR(x) case x: return #x
  24475. ++
  24476. + switch (service_id) {
  24477. +- case WMI_SERVICE_BEACON_OFFLOAD:
  24478. +- return "BEACON_OFFLOAD";
  24479. +- case WMI_SERVICE_SCAN_OFFLOAD:
  24480. +- return "SCAN_OFFLOAD";
  24481. +- case WMI_SERVICE_ROAM_OFFLOAD:
  24482. +- return "ROAM_OFFLOAD";
  24483. +- case WMI_SERVICE_BCN_MISS_OFFLOAD:
  24484. +- return "BCN_MISS_OFFLOAD";
  24485. +- case WMI_SERVICE_STA_PWRSAVE:
  24486. +- return "STA_PWRSAVE";
  24487. +- case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
  24488. +- return "STA_ADVANCED_PWRSAVE";
  24489. +- case WMI_SERVICE_AP_UAPSD:
  24490. +- return "AP_UAPSD";
  24491. +- case WMI_SERVICE_AP_DFS:
  24492. +- return "AP_DFS";
  24493. +- case WMI_SERVICE_11AC:
  24494. +- return "11AC";
  24495. +- case WMI_SERVICE_BLOCKACK:
  24496. +- return "BLOCKACK";
  24497. +- case WMI_SERVICE_PHYERR:
  24498. +- return "PHYERR";
  24499. +- case WMI_SERVICE_BCN_FILTER:
  24500. +- return "BCN_FILTER";
  24501. +- case WMI_SERVICE_RTT:
  24502. +- return "RTT";
  24503. +- case WMI_SERVICE_RATECTRL:
  24504. +- return "RATECTRL";
  24505. +- case WMI_SERVICE_WOW:
  24506. +- return "WOW";
  24507. +- case WMI_SERVICE_RATECTRL_CACHE:
  24508. +- return "RATECTRL CACHE";
  24509. +- case WMI_SERVICE_IRAM_TIDS:
  24510. +- return "IRAM TIDS";
  24511. +- case WMI_SERVICE_ARPNS_OFFLOAD:
  24512. +- return "ARPNS_OFFLOAD";
  24513. +- case WMI_SERVICE_NLO:
  24514. +- return "NLO";
  24515. +- case WMI_SERVICE_GTK_OFFLOAD:
  24516. +- return "GTK_OFFLOAD";
  24517. +- case WMI_SERVICE_SCAN_SCH:
  24518. +- return "SCAN_SCH";
  24519. +- case WMI_SERVICE_CSA_OFFLOAD:
  24520. +- return "CSA_OFFLOAD";
  24521. +- case WMI_SERVICE_CHATTER:
  24522. +- return "CHATTER";
  24523. +- case WMI_SERVICE_COEX_FREQAVOID:
  24524. +- return "COEX_FREQAVOID";
  24525. +- case WMI_SERVICE_PACKET_POWER_SAVE:
  24526. +- return "PACKET_POWER_SAVE";
  24527. +- case WMI_SERVICE_FORCE_FW_HANG:
  24528. +- return "FORCE FW HANG";
  24529. +- case WMI_SERVICE_GPIO:
  24530. +- return "GPIO";
  24531. +- case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
  24532. +- return "MODULATED DTIM";
  24533. +- case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
  24534. +- return "BASIC UAPSD";
  24535. +- case WMI_STA_UAPSD_VAR_AUTO_TRIG:
  24536. +- return "VAR UAPSD";
  24537. +- case WMI_SERVICE_STA_KEEP_ALIVE:
  24538. +- return "STA KEEP ALIVE";
  24539. +- case WMI_SERVICE_TX_ENCAP:
  24540. +- return "TX ENCAP";
  24541. ++ SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
  24542. ++ SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
  24543. ++ SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
  24544. ++ SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
  24545. ++ SVCSTR(WMI_SERVICE_STA_PWRSAVE);
  24546. ++ SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
  24547. ++ SVCSTR(WMI_SERVICE_AP_UAPSD);
  24548. ++ SVCSTR(WMI_SERVICE_AP_DFS);
  24549. ++ SVCSTR(WMI_SERVICE_11AC);
  24550. ++ SVCSTR(WMI_SERVICE_BLOCKACK);
  24551. ++ SVCSTR(WMI_SERVICE_PHYERR);
  24552. ++ SVCSTR(WMI_SERVICE_BCN_FILTER);
  24553. ++ SVCSTR(WMI_SERVICE_RTT);
  24554. ++ SVCSTR(WMI_SERVICE_RATECTRL);
  24555. ++ SVCSTR(WMI_SERVICE_WOW);
  24556. ++ SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
  24557. ++ SVCSTR(WMI_SERVICE_IRAM_TIDS);
  24558. ++ SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
  24559. ++ SVCSTR(WMI_SERVICE_NLO);
  24560. ++ SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
  24561. ++ SVCSTR(WMI_SERVICE_SCAN_SCH);
  24562. ++ SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
  24563. ++ SVCSTR(WMI_SERVICE_CHATTER);
  24564. ++ SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
  24565. ++ SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
  24566. ++ SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
  24567. ++ SVCSTR(WMI_SERVICE_GPIO);
  24568. ++ SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
  24569. ++ SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
  24570. ++ SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
  24571. ++ SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
  24572. ++ SVCSTR(WMI_SERVICE_TX_ENCAP);
  24573. ++ SVCSTR(WMI_SERVICE_BURST);
  24574. ++ SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
  24575. ++ SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
  24576. ++ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
  24577. ++ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
  24578. ++ SVCSTR(WMI_SERVICE_EARLY_RX);
  24579. ++ SVCSTR(WMI_SERVICE_STA_SMPS);
  24580. ++ SVCSTR(WMI_SERVICE_FWTEST);
  24581. ++ SVCSTR(WMI_SERVICE_STA_WMMAC);
  24582. ++ SVCSTR(WMI_SERVICE_TDLS);
  24583. ++ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
  24584. ++ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
  24585. ++ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
  24586. ++ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
  24587. ++ SVCSTR(WMI_SERVICE_WLAN_HB);
  24588. ++ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
  24589. ++ SVCSTR(WMI_SERVICE_BATCH_SCAN);
  24590. ++ SVCSTR(WMI_SERVICE_QPOWER);
  24591. ++ SVCSTR(WMI_SERVICE_PLMREQ);
  24592. ++ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
  24593. ++ SVCSTR(WMI_SERVICE_RMC);
  24594. ++ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
  24595. ++ SVCSTR(WMI_SERVICE_COEX_SAR);
  24596. ++ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
  24597. ++ SVCSTR(WMI_SERVICE_NAN);
  24598. ++ SVCSTR(WMI_SERVICE_L1SS_STAT);
  24599. ++ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
  24600. ++ SVCSTR(WMI_SERVICE_OBSS_SCAN);
  24601. ++ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
  24602. ++ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
  24603. ++ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
  24604. ++ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
  24605. ++ SVCSTR(WMI_SERVICE_LPASS);
  24606. ++ SVCSTR(WMI_SERVICE_EXTSCAN);
  24607. ++ SVCSTR(WMI_SERVICE_D0WOW);
  24608. ++ SVCSTR(WMI_SERVICE_HSOFFLOAD);
  24609. ++ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
  24610. ++ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
  24611. ++ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
  24612. ++ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
  24613. ++ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
  24614. ++ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
  24615. + default:
  24616. +- return "UNKNOWN SERVICE\n";
  24617. ++ return NULL;
  24618. + }
  24619. ++
  24620. ++#undef SVCSTR
  24621. + }
  24622. +
  24623. ++#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
  24624. ++ ((svc_id) < (len) && \
  24625. ++ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
  24626. ++ BIT((svc_id)%(sizeof(u32))))
  24627. ++
  24628. ++#define SVCMAP(x, y, len) \
  24629. ++ do { \
  24630. ++ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
  24631. ++ __set_bit(y, out); \
  24632. ++ } while (0)
  24633. ++
  24634. ++static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
  24635. ++ size_t len)
  24636. ++{
  24637. ++ SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
  24638. ++ WMI_SERVICE_BEACON_OFFLOAD, len);
  24639. ++ SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
  24640. ++ WMI_SERVICE_SCAN_OFFLOAD, len);
  24641. ++ SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
  24642. ++ WMI_SERVICE_ROAM_OFFLOAD, len);
  24643. ++ SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
  24644. ++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
  24645. ++ SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
  24646. ++ WMI_SERVICE_STA_PWRSAVE, len);
  24647. ++ SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
  24648. ++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
  24649. ++ SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
  24650. ++ WMI_SERVICE_AP_UAPSD, len);
  24651. ++ SVCMAP(WMI_10X_SERVICE_AP_DFS,
  24652. ++ WMI_SERVICE_AP_DFS, len);
  24653. ++ SVCMAP(WMI_10X_SERVICE_11AC,
  24654. ++ WMI_SERVICE_11AC, len);
  24655. ++ SVCMAP(WMI_10X_SERVICE_BLOCKACK,
  24656. ++ WMI_SERVICE_BLOCKACK, len);
  24657. ++ SVCMAP(WMI_10X_SERVICE_PHYERR,
  24658. ++ WMI_SERVICE_PHYERR, len);
  24659. ++ SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
  24660. ++ WMI_SERVICE_BCN_FILTER, len);
  24661. ++ SVCMAP(WMI_10X_SERVICE_RTT,
  24662. ++ WMI_SERVICE_RTT, len);
  24663. ++ SVCMAP(WMI_10X_SERVICE_RATECTRL,
  24664. ++ WMI_SERVICE_RATECTRL, len);
  24665. ++ SVCMAP(WMI_10X_SERVICE_WOW,
  24666. ++ WMI_SERVICE_WOW, len);
  24667. ++ SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
  24668. ++ WMI_SERVICE_RATECTRL_CACHE, len);
  24669. ++ SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
  24670. ++ WMI_SERVICE_IRAM_TIDS, len);
  24671. ++ SVCMAP(WMI_10X_SERVICE_BURST,
  24672. ++ WMI_SERVICE_BURST, len);
  24673. ++ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
  24674. ++ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
  24675. ++ SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
  24676. ++ WMI_SERVICE_FORCE_FW_HANG, len);
  24677. ++ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
  24678. ++ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
  24679. ++}
  24680. ++
  24681. ++static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
  24682. ++ size_t len)
  24683. ++{
  24684. ++ SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
  24685. ++ WMI_SERVICE_BEACON_OFFLOAD, len);
  24686. ++ SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
  24687. ++ WMI_SERVICE_SCAN_OFFLOAD, len);
  24688. ++ SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
  24689. ++ WMI_SERVICE_ROAM_OFFLOAD, len);
  24690. ++ SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
  24691. ++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
  24692. ++ SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
  24693. ++ WMI_SERVICE_STA_PWRSAVE, len);
  24694. ++ SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
  24695. ++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
  24696. ++ SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
  24697. ++ WMI_SERVICE_AP_UAPSD, len);
  24698. ++ SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
  24699. ++ WMI_SERVICE_AP_DFS, len);
  24700. ++ SVCMAP(WMI_MAIN_SERVICE_11AC,
  24701. ++ WMI_SERVICE_11AC, len);
  24702. ++ SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
  24703. ++ WMI_SERVICE_BLOCKACK, len);
  24704. ++ SVCMAP(WMI_MAIN_SERVICE_PHYERR,
  24705. ++ WMI_SERVICE_PHYERR, len);
  24706. ++ SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
  24707. ++ WMI_SERVICE_BCN_FILTER, len);
  24708. ++ SVCMAP(WMI_MAIN_SERVICE_RTT,
  24709. ++ WMI_SERVICE_RTT, len);
  24710. ++ SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
  24711. ++ WMI_SERVICE_RATECTRL, len);
  24712. ++ SVCMAP(WMI_MAIN_SERVICE_WOW,
  24713. ++ WMI_SERVICE_WOW, len);
  24714. ++ SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
  24715. ++ WMI_SERVICE_RATECTRL_CACHE, len);
  24716. ++ SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
  24717. ++ WMI_SERVICE_IRAM_TIDS, len);
  24718. ++ SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
  24719. ++ WMI_SERVICE_ARPNS_OFFLOAD, len);
  24720. ++ SVCMAP(WMI_MAIN_SERVICE_NLO,
  24721. ++ WMI_SERVICE_NLO, len);
  24722. ++ SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
  24723. ++ WMI_SERVICE_GTK_OFFLOAD, len);
  24724. ++ SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
  24725. ++ WMI_SERVICE_SCAN_SCH, len);
  24726. ++ SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
  24727. ++ WMI_SERVICE_CSA_OFFLOAD, len);
  24728. ++ SVCMAP(WMI_MAIN_SERVICE_CHATTER,
  24729. ++ WMI_SERVICE_CHATTER, len);
  24730. ++ SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
  24731. ++ WMI_SERVICE_COEX_FREQAVOID, len);
  24732. ++ SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
  24733. ++ WMI_SERVICE_PACKET_POWER_SAVE, len);
  24734. ++ SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
  24735. ++ WMI_SERVICE_FORCE_FW_HANG, len);
  24736. ++ SVCMAP(WMI_MAIN_SERVICE_GPIO,
  24737. ++ WMI_SERVICE_GPIO, len);
  24738. ++ SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
  24739. ++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
  24740. ++ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
  24741. ++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
  24742. ++ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
  24743. ++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
  24744. ++ SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
  24745. ++ WMI_SERVICE_STA_KEEP_ALIVE, len);
  24746. ++ SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
  24747. ++ WMI_SERVICE_TX_ENCAP, len);
  24748. ++}
  24749. +
  24750. +-#define WMI_SERVICE_BM_SIZE \
  24751. +- ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
  24752. ++#undef SVCMAP
  24753. +
  24754. + /* 2 word representation of MAC addr */
  24755. + struct wmi_mac_addr {
  24756. +@@ -308,6 +550,8 @@ struct wmi_cmd_map {
  24757. + u32 force_fw_hang_cmdid;
  24758. + u32 gpio_config_cmdid;
  24759. + u32 gpio_output_cmdid;
  24760. ++ u32 pdev_get_temperature_cmdid;
  24761. ++ u32 vdev_set_wmm_params_cmdid;
  24762. + };
  24763. +
  24764. + /*
  24765. +@@ -803,6 +1047,166 @@ enum wmi_10x_event_id {
  24766. + WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
  24767. + };
  24768. +
  24769. ++enum wmi_10_2_cmd_id {
  24770. ++ WMI_10_2_START_CMDID = 0x9000,
  24771. ++ WMI_10_2_END_CMDID = 0x9FFF,
  24772. ++ WMI_10_2_INIT_CMDID,
  24773. ++ WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
  24774. ++ WMI_10_2_STOP_SCAN_CMDID,
  24775. ++ WMI_10_2_SCAN_CHAN_LIST_CMDID,
  24776. ++ WMI_10_2_ECHO_CMDID,
  24777. ++ WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  24778. ++ WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  24779. ++ WMI_10_2_PDEV_SET_PARAM_CMDID,
  24780. ++ WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  24781. ++ WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  24782. ++ WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  24783. ++ WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  24784. ++ WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  24785. ++ WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  24786. ++ WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  24787. ++ WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  24788. ++ WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  24789. ++ WMI_10_2_VDEV_CREATE_CMDID,
  24790. ++ WMI_10_2_VDEV_DELETE_CMDID,
  24791. ++ WMI_10_2_VDEV_START_REQUEST_CMDID,
  24792. ++ WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  24793. ++ WMI_10_2_VDEV_UP_CMDID,
  24794. ++ WMI_10_2_VDEV_STOP_CMDID,
  24795. ++ WMI_10_2_VDEV_DOWN_CMDID,
  24796. ++ WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
  24797. ++ WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
  24798. ++ WMI_10_2_VDEV_SET_PARAM_CMDID,
  24799. ++ WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  24800. ++ WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
  24801. ++ WMI_10_2_PEER_CREATE_CMDID,
  24802. ++ WMI_10_2_PEER_DELETE_CMDID,
  24803. ++ WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  24804. ++ WMI_10_2_PEER_SET_PARAM_CMDID,
  24805. ++ WMI_10_2_PEER_ASSOC_CMDID,
  24806. ++ WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  24807. ++ WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
  24808. ++ WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  24809. ++ WMI_10_2_PEER_MCAST_GROUP_CMDID,
  24810. ++ WMI_10_2_BCN_TX_CMDID,
  24811. ++ WMI_10_2_BCN_PRB_TMPL_CMDID,
  24812. ++ WMI_10_2_BCN_FILTER_RX_CMDID,
  24813. ++ WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  24814. ++ WMI_10_2_MGMT_TX_CMDID,
  24815. ++ WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  24816. ++ WMI_10_2_ADDBA_SEND_CMDID,
  24817. ++ WMI_10_2_ADDBA_STATUS_CMDID,
  24818. ++ WMI_10_2_DELBA_SEND_CMDID,
  24819. ++ WMI_10_2_ADDBA_SET_RESP_CMDID,
  24820. ++ WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  24821. ++ WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  24822. ++ WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  24823. ++ WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  24824. ++ WMI_10_2_DBGLOG_CFG_CMDID,
  24825. ++ WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  24826. ++ WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  24827. ++ WMI_10_2_PDEV_QVIT_CMDID,
  24828. ++ WMI_10_2_ROAM_SCAN_MODE,
  24829. ++ WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  24830. ++ WMI_10_2_ROAM_SCAN_PERIOD,
  24831. ++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  24832. ++ WMI_10_2_ROAM_AP_PROFILE,
  24833. ++ WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  24834. ++ WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  24835. ++ WMI_10_2_OFL_SCAN_PERIOD,
  24836. ++ WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  24837. ++ WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  24838. ++ WMI_10_2_P2P_GO_SET_BEACON_IE,
  24839. ++ WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  24840. ++ WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  24841. ++ WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
  24842. ++ WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  24843. ++ WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  24844. ++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  24845. ++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  24846. ++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  24847. ++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  24848. ++ WMI_10_2_PDEV_SUSPEND_CMDID,
  24849. ++ WMI_10_2_PDEV_RESUME_CMDID,
  24850. ++ WMI_10_2_ADD_BCN_FILTER_CMDID,
  24851. ++ WMI_10_2_RMV_BCN_FILTER_CMDID,
  24852. ++ WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  24853. ++ WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  24854. ++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  24855. ++ WMI_10_2_WOW_ENABLE_CMDID,
  24856. ++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  24857. ++ WMI_10_2_RTT_MEASREQ_CMDID,
  24858. ++ WMI_10_2_RTT_TSF_CMDID,
  24859. ++ WMI_10_2_RTT_KEEPALIVE_CMDID,
  24860. ++ WMI_10_2_PDEV_SEND_BCN_CMDID,
  24861. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  24862. ++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  24863. ++ WMI_10_2_REQUEST_STATS_CMDID,
  24864. ++ WMI_10_2_GPIO_CONFIG_CMDID,
  24865. ++ WMI_10_2_GPIO_OUTPUT_CMDID,
  24866. ++ WMI_10_2_VDEV_RATEMASK_CMDID,
  24867. ++ WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
  24868. ++ WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
  24869. ++ WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
  24870. ++ WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
  24871. ++ WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
  24872. ++ WMI_10_2_FORCE_FW_HANG_CMDID,
  24873. ++ WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
  24874. ++ WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
  24875. ++ WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
  24876. ++ WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
  24877. ++ WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
  24878. ++ WMI_10_2_PDEV_GET_INFO,
  24879. ++ WMI_10_2_VDEV_GET_INFO,
  24880. ++ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
  24881. ++ WMI_10_2_PEER_ATF_REQUEST_CMDID,
  24882. ++ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
  24883. ++ WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
  24884. ++};
  24885. ++
  24886. ++enum wmi_10_2_event_id {
  24887. ++ WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
  24888. ++ WMI_10_2_READY_EVENTID,
  24889. ++ WMI_10_2_DEBUG_MESG_EVENTID,
  24890. ++ WMI_10_2_START_EVENTID = 0x9000,
  24891. ++ WMI_10_2_END_EVENTID = 0x9FFF,
  24892. ++ WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
  24893. ++ WMI_10_2_ECHO_EVENTID,
  24894. ++ WMI_10_2_UPDATE_STATS_EVENTID,
  24895. ++ WMI_10_2_INST_RSSI_STATS_EVENTID,
  24896. ++ WMI_10_2_VDEV_START_RESP_EVENTID,
  24897. ++ WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
  24898. ++ WMI_10_2_VDEV_RESUME_REQ_EVENTID,
  24899. ++ WMI_10_2_VDEV_STOPPED_EVENTID,
  24900. ++ WMI_10_2_PEER_STA_KICKOUT_EVENTID,
  24901. ++ WMI_10_2_HOST_SWBA_EVENTID,
  24902. ++ WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
  24903. ++ WMI_10_2_MGMT_RX_EVENTID,
  24904. ++ WMI_10_2_CHAN_INFO_EVENTID,
  24905. ++ WMI_10_2_PHYERR_EVENTID,
  24906. ++ WMI_10_2_ROAM_EVENTID,
  24907. ++ WMI_10_2_PROFILE_MATCH,
  24908. ++ WMI_10_2_DEBUG_PRINT_EVENTID,
  24909. ++ WMI_10_2_PDEV_QVIT_EVENTID,
  24910. ++ WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
  24911. ++ WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
  24912. ++ WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
  24913. ++ WMI_10_2_RTT_ERROR_REPORT_EVENTID,
  24914. ++ WMI_10_2_RTT_KEEPALIVE_EVENTID,
  24915. ++ WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
  24916. ++ WMI_10_2_DCS_INTERFERENCE_EVENTID,
  24917. ++ WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
  24918. ++ WMI_10_2_GPIO_INPUT_EVENTID,
  24919. ++ WMI_10_2_PEER_RATECODE_LIST_EVENTID,
  24920. ++ WMI_10_2_GENERIC_BUFFER_EVENTID,
  24921. ++ WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
  24922. ++ WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
  24923. ++ WMI_10_2_WDS_PEER_EVENTID,
  24924. ++ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
  24925. ++ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
  24926. ++ WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
  24927. ++};
  24928. ++
  24929. + enum wmi_phy_mode {
  24930. + MODE_11A = 0, /* 11a Mode */
  24931. + MODE_11G = 1, /* 11b/g Mode */
  24932. +@@ -955,7 +1359,6 @@ enum wmi_channel_change_cause {
  24933. + WMI_HT_CAP_RX_STBC | \
  24934. + WMI_HT_CAP_LDPC)
  24935. +
  24936. +-
  24937. + /*
  24938. + * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
  24939. + * field. The fields not defined here are not supported, or reserved.
  24940. +@@ -1076,10 +1479,6 @@ struct wlan_host_mem_req {
  24941. + __le32 num_units;
  24942. + } __packed;
  24943. +
  24944. +-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
  24945. +- ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
  24946. +- (1 << ((svc_id)%(sizeof(u32))))) != 0)
  24947. +-
  24948. + /*
  24949. + * The following struct holds optional payload for
  24950. + * wmi_service_ready_event,e.g., 11ac pass some of the
  24951. +@@ -1093,7 +1492,7 @@ struct wmi_service_ready_event {
  24952. + __le32 phy_capability;
  24953. + /* Maximum number of frag table entries that SW will populate less 1 */
  24954. + __le32 max_frag_entry;
  24955. +- __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
  24956. ++ __le32 wmi_service_bitmap[16];
  24957. + __le32 num_rf_chains;
  24958. + /*
  24959. + * The following field is only valid for service type
  24960. +@@ -1119,11 +1518,11 @@ struct wmi_service_ready_event {
  24961. + * where FW can access this memory directly (or) by DMA.
  24962. + */
  24963. + __le32 num_mem_reqs;
  24964. +- struct wlan_host_mem_req mem_reqs[1];
  24965. ++ struct wlan_host_mem_req mem_reqs[0];
  24966. + } __packed;
  24967. +
  24968. + /* This is the definition from 10.X firmware branch */
  24969. +-struct wmi_service_ready_event_10x {
  24970. ++struct wmi_10x_service_ready_event {
  24971. + __le32 sw_version;
  24972. + __le32 abi_version;
  24973. +
  24974. +@@ -1132,7 +1531,7 @@ struct wmi_service_ready_event_10x {
  24975. +
  24976. + /* Maximum number of frag table entries that SW will populate less 1 */
  24977. + __le32 max_frag_entry;
  24978. +- __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
  24979. ++ __le32 wmi_service_bitmap[16];
  24980. + __le32 num_rf_chains;
  24981. +
  24982. + /*
  24983. +@@ -1158,10 +1557,9 @@ struct wmi_service_ready_event_10x {
  24984. + */
  24985. + __le32 num_mem_reqs;
  24986. +
  24987. +- struct wlan_host_mem_req mem_reqs[1];
  24988. ++ struct wlan_host_mem_req mem_reqs[0];
  24989. + } __packed;
  24990. +
  24991. +-
  24992. + #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
  24993. + #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
  24994. +
  24995. +@@ -1255,7 +1653,7 @@ struct wmi_resource_config {
  24996. + */
  24997. + __le32 rx_decap_mode;
  24998. +
  24999. +- /* what is the maximum scan requests than can be queued */
  25000. ++ /* what is the maximum number of scan requests that can be queued */
  25001. + __le32 scan_max_pending_reqs;
  25002. +
  25003. + /* maximum VDEV that could use BMISS offload */
  25004. +@@ -1440,7 +1838,7 @@ struct wmi_resource_config_10x {
  25005. + */
  25006. + __le32 rx_decap_mode;
  25007. +
  25008. +- /* what is the maximum scan requests than can be queued */
  25009. ++ /* what is the maximum number of scan requests that can be queued */
  25010. + __le32 scan_max_pending_reqs;
  25011. +
  25012. + /* maximum VDEV that could use BMISS offload */
  25013. +@@ -1551,6 +1949,21 @@ struct wmi_resource_config_10x {
  25014. + __le32 max_frag_entries;
  25015. + } __packed;
  25016. +
  25017. ++enum wmi_10_2_feature_mask {
  25018. ++ WMI_10_2_RX_BATCH_MODE = BIT(0),
  25019. ++ WMI_10_2_ATF_CONFIG = BIT(1),
  25020. ++};
  25021. ++
  25022. ++struct wmi_resource_config_10_2 {
  25023. ++ struct wmi_resource_config_10x common;
  25024. ++ __le32 max_peer_ext_stats;
  25025. ++ __le32 smart_ant_cap; /* 0-disable, 1-enable */
  25026. ++ __le32 bk_min_free;
  25027. ++ __le32 be_min_free;
  25028. ++ __le32 vi_min_free;
  25029. ++ __le32 vo_min_free;
  25030. ++ __le32 feature_mask;
  25031. ++} __packed;
  25032. +
  25033. + #define NUM_UNITS_IS_NUM_VDEVS 0x1
  25034. + #define NUM_UNITS_IS_NUM_PEERS 0x2
  25035. +@@ -1565,34 +1978,39 @@ struct host_memory_chunk {
  25036. + __le32 size;
  25037. + } __packed;
  25038. +
  25039. ++struct wmi_host_mem_chunks {
  25040. ++ __le32 count;
  25041. ++ /* some fw revisions require at least 1 chunk regardless of count */
  25042. ++ struct host_memory_chunk items[1];
  25043. ++} __packed;
  25044. ++
  25045. + struct wmi_init_cmd {
  25046. + struct wmi_resource_config resource_config;
  25047. +- __le32 num_host_mem_chunks;
  25048. +-
  25049. +- /*
  25050. +- * variable number of host memory chunks.
  25051. +- * This should be the last element in the structure
  25052. +- */
  25053. +- struct host_memory_chunk host_mem_chunks[1];
  25054. ++ struct wmi_host_mem_chunks mem_chunks;
  25055. + } __packed;
  25056. +
  25057. + /* _10x stucture is from 10.X FW API */
  25058. + struct wmi_init_cmd_10x {
  25059. + struct wmi_resource_config_10x resource_config;
  25060. +- __le32 num_host_mem_chunks;
  25061. ++ struct wmi_host_mem_chunks mem_chunks;
  25062. ++} __packed;
  25063. +
  25064. +- /*
  25065. +- * variable number of host memory chunks.
  25066. +- * This should be the last element in the structure
  25067. +- */
  25068. +- struct host_memory_chunk host_mem_chunks[1];
  25069. ++struct wmi_init_cmd_10_2 {
  25070. ++ struct wmi_resource_config_10_2 resource_config;
  25071. ++ struct wmi_host_mem_chunks mem_chunks;
  25072. ++} __packed;
  25073. ++
  25074. ++struct wmi_chan_list_entry {
  25075. ++ __le16 freq;
  25076. ++ u8 phy_mode; /* valid for 10.2 only */
  25077. ++ u8 reserved;
  25078. + } __packed;
  25079. +
  25080. + /* TLV for channel list */
  25081. + struct wmi_chan_list {
  25082. + __le32 tag; /* WMI_CHAN_LIST_TAG */
  25083. + __le32 num_chan;
  25084. +- __le32 channel_list[0];
  25085. ++ struct wmi_chan_list_entry channel_list[0];
  25086. + } __packed;
  25087. +
  25088. + struct wmi_bssid_list {
  25089. +@@ -1629,6 +2047,11 @@ struct wmi_ssid_list {
  25090. + #define WLAN_SCAN_PARAMS_MAX_BSSID 4
  25091. + #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
  25092. +
  25093. ++/* Values lower than this may be refused by some firmware revisions with a scan
  25094. ++ * completion with a timedout reason.
  25095. ++ */
  25096. ++#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
  25097. ++
  25098. + /* Scan priority numbers must be sequential, starting with 0 */
  25099. + enum wmi_scan_priority {
  25100. + WMI_SCAN_PRIORITY_VERY_LOW = 0,
  25101. +@@ -1639,7 +2062,7 @@ enum wmi_scan_priority {
  25102. + WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
  25103. + };
  25104. +
  25105. +-struct wmi_start_scan_cmd {
  25106. ++struct wmi_start_scan_common {
  25107. + /* Scan ID */
  25108. + __le32 scan_id;
  25109. + /* Scan requestor ID */
  25110. +@@ -1697,97 +2120,26 @@ struct wmi_start_scan_cmd {
  25111. + __le32 probe_delay;
  25112. + /* Scan control flags */
  25113. + __le32 scan_ctrl_flags;
  25114. +-
  25115. +- /* Burst duration time in msecs */
  25116. +- __le32 burst_duration;
  25117. +- /*
  25118. +- * TLV (tag length value ) paramerters follow the scan_cmd structure.
  25119. +- * TLV can contain channel list, bssid list, ssid list and
  25120. +- * ie. the TLV tags are defined above;
  25121. +- */
  25122. + } __packed;
  25123. +
  25124. +-/* This is the definition from 10.X firmware branch */
  25125. +-struct wmi_start_scan_cmd_10x {
  25126. +- /* Scan ID */
  25127. +- __le32 scan_id;
  25128. +-
  25129. +- /* Scan requestor ID */
  25130. +- __le32 scan_req_id;
  25131. +-
  25132. +- /* VDEV id(interface) that is requesting scan */
  25133. +- __le32 vdev_id;
  25134. +-
  25135. +- /* Scan Priority, input to scan scheduler */
  25136. +- __le32 scan_priority;
  25137. +-
  25138. +- /* Scan events subscription */
  25139. +- __le32 notify_scan_events;
  25140. +-
  25141. +- /* dwell time in msec on active channels */
  25142. +- __le32 dwell_time_active;
  25143. +-
  25144. +- /* dwell time in msec on passive channels */
  25145. +- __le32 dwell_time_passive;
  25146. +-
  25147. +- /*
  25148. +- * min time in msec on the BSS channel,only valid if atleast one
  25149. +- * VDEV is active
  25150. +- */
  25151. +- __le32 min_rest_time;
  25152. +-
  25153. +- /*
  25154. +- * max rest time in msec on the BSS channel,only valid if at least
  25155. +- * one VDEV is active
  25156. +- */
  25157. +- /*
  25158. +- * the scanner will rest on the bss channel at least min_rest_time
  25159. +- * after min_rest_time the scanner will start checking for tx/rx
  25160. +- * activity on all VDEVs. if there is no activity the scanner will
  25161. +- * switch to off channel. if there is activity the scanner will let
  25162. +- * the radio on the bss channel until max_rest_time expires.at
  25163. +- * max_rest_time scanner will switch to off channel irrespective of
  25164. +- * activity. activity is determined by the idle_time parameter.
  25165. +- */
  25166. +- __le32 max_rest_time;
  25167. +-
  25168. +- /*
  25169. +- * time before sending next set of probe requests.
  25170. +- * The scanner keeps repeating probe requests transmission with
  25171. +- * period specified by repeat_probe_time.
  25172. +- * The number of probe requests specified depends on the ssid_list
  25173. +- * and bssid_list
  25174. +- */
  25175. +- __le32 repeat_probe_time;
  25176. +-
  25177. +- /* time in msec between 2 consequetive probe requests with in a set. */
  25178. +- __le32 probe_spacing_time;
  25179. +-
  25180. +- /*
  25181. +- * data inactivity time in msec on bss channel that will be used by
  25182. +- * scanner for measuring the inactivity.
  25183. ++struct wmi_start_scan_tlvs {
  25184. ++ /* TLV parameters. These includes channel list, ssid list, bssid list,
  25185. ++ * extra ies.
  25186. + */
  25187. +- __le32 idle_time;
  25188. +-
  25189. +- /* maximum time in msec allowed for scan */
  25190. +- __le32 max_scan_time;
  25191. +-
  25192. +- /*
  25193. +- * delay in msec before sending first probe request after switching
  25194. +- * to a channel
  25195. +- */
  25196. +- __le32 probe_delay;
  25197. +-
  25198. +- /* Scan control flags */
  25199. +- __le32 scan_ctrl_flags;
  25200. ++ u8 tlvs[0];
  25201. ++} __packed;
  25202. +
  25203. +- /*
  25204. +- * TLV (tag length value ) paramerters follow the scan_cmd structure.
  25205. +- * TLV can contain channel list, bssid list, ssid list and
  25206. +- * ie. the TLV tags are defined above;
  25207. +- */
  25208. ++struct wmi_start_scan_cmd {
  25209. ++ struct wmi_start_scan_common common;
  25210. ++ __le32 burst_duration_ms;
  25211. ++ struct wmi_start_scan_tlvs tlvs;
  25212. + } __packed;
  25213. +
  25214. ++/* This is the definition from 10.X firmware branch */
  25215. ++struct wmi_10x_start_scan_cmd {
  25216. ++ struct wmi_start_scan_common common;
  25217. ++ struct wmi_start_scan_tlvs tlvs;
  25218. ++} __packed;
  25219. +
  25220. + struct wmi_ssid_arg {
  25221. + int len;
  25222. +@@ -1821,7 +2173,7 @@ struct wmi_start_scan_arg {
  25223. + u32 n_bssids;
  25224. +
  25225. + u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
  25226. +- u32 channels[64];
  25227. ++ u16 channels[64];
  25228. + struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
  25229. + struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
  25230. + };
  25231. +@@ -1849,7 +2201,6 @@ struct wmi_start_scan_arg {
  25232. + /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
  25233. + #define WMI_SCAN_CLASS_MASK 0xFF000000
  25234. +
  25235. +-
  25236. + enum wmi_stop_scan_type {
  25237. + WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
  25238. + WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
  25239. +@@ -1973,100 +2324,31 @@ struct wmi_mgmt_rx_event_v2 {
  25240. + #define PHY_ERROR_FALSE_RADAR_EXT 0x24
  25241. + #define PHY_ERROR_RADAR 0x05
  25242. +
  25243. +-struct wmi_single_phyerr_rx_hdr {
  25244. +- /* TSF timestamp */
  25245. ++struct wmi_phyerr {
  25246. + __le32 tsf_timestamp;
  25247. +-
  25248. +- /*
  25249. +- * Current freq1, freq2
  25250. +- *
  25251. +- * [7:0]: freq1[lo]
  25252. +- * [15:8] : freq1[hi]
  25253. +- * [23:16]: freq2[lo]
  25254. +- * [31:24]: freq2[hi]
  25255. +- */
  25256. + __le16 freq1;
  25257. + __le16 freq2;
  25258. +-
  25259. +- /*
  25260. +- * Combined RSSI over all chains and channel width for this PHY error
  25261. +- *
  25262. +- * [7:0]: RSSI combined
  25263. +- * [15:8]: Channel width (MHz)
  25264. +- * [23:16]: PHY error code
  25265. +- * [24:16]: reserved (future use)
  25266. +- */
  25267. + u8 rssi_combined;
  25268. + u8 chan_width_mhz;
  25269. + u8 phy_err_code;
  25270. + u8 rsvd0;
  25271. +-
  25272. +- /*
  25273. +- * RSSI on chain 0 through 3
  25274. +- *
  25275. +- * This is formatted the same as the PPDU_START RX descriptor
  25276. +- * field:
  25277. +- *
  25278. +- * [7:0]: pri20
  25279. +- * [15:8]: sec20
  25280. +- * [23:16]: sec40
  25281. +- * [31:24]: sec80
  25282. +- */
  25283. +-
  25284. +- __le32 rssi_chain0;
  25285. +- __le32 rssi_chain1;
  25286. +- __le32 rssi_chain2;
  25287. +- __le32 rssi_chain3;
  25288. +-
  25289. +- /*
  25290. +- * Last calibrated NF value for chain 0 through 3
  25291. +- *
  25292. +- * nf_list_1:
  25293. +- *
  25294. +- * + [15:0] - chain 0
  25295. +- * + [31:16] - chain 1
  25296. +- *
  25297. +- * nf_list_2:
  25298. +- *
  25299. +- * + [15:0] - chain 2
  25300. +- * + [31:16] - chain 3
  25301. +- */
  25302. +- __le32 nf_list_1;
  25303. +- __le32 nf_list_2;
  25304. +-
  25305. +-
  25306. +- /* Length of the frame */
  25307. ++ __le32 rssi_chains[4];
  25308. ++ __le16 nf_chains[4];
  25309. + __le32 buf_len;
  25310. ++ u8 buf[0];
  25311. + } __packed;
  25312. +
  25313. +-struct wmi_single_phyerr_rx_event {
  25314. +- /* Phy error event header */
  25315. +- struct wmi_single_phyerr_rx_hdr hdr;
  25316. +- /* frame buffer */
  25317. +- u8 bufp[0];
  25318. +-} __packed;
  25319. +-
  25320. +-struct wmi_comb_phyerr_rx_hdr {
  25321. +- /* Phy error phy error count */
  25322. +- __le32 num_phyerr_events;
  25323. ++struct wmi_phyerr_event {
  25324. ++ __le32 num_phyerrs;
  25325. + __le32 tsf_l32;
  25326. + __le32 tsf_u32;
  25327. +-} __packed;
  25328. +-
  25329. +-struct wmi_comb_phyerr_rx_event {
  25330. +- /* Phy error phy error count */
  25331. +- struct wmi_comb_phyerr_rx_hdr hdr;
  25332. +- /*
  25333. +- * frame buffer - contains multiple payloads in the order:
  25334. +- * header - payload, header - payload...
  25335. +- * (The header is of type: wmi_single_phyerr_rx_hdr)
  25336. +- */
  25337. +- u8 bufp[0];
  25338. ++ struct wmi_phyerr phyerrs[0];
  25339. + } __packed;
  25340. +
  25341. + #define PHYERR_TLV_SIG 0xBB
  25342. + #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
  25343. + #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
  25344. ++#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
  25345. +
  25346. + struct phyerr_radar_report {
  25347. + __le32 reg0; /* RADAR_REPORT_REG0_* */
  25348. +@@ -2135,7 +2417,6 @@ struct phyerr_fft_report {
  25349. + #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
  25350. + #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
  25351. +
  25352. +-
  25353. + struct phyerr_tlv {
  25354. + __le16 len;
  25355. + u8 tag;
  25356. +@@ -2166,7 +2447,6 @@ struct wmi_echo_cmd {
  25357. + __le32 value;
  25358. + } __packed;
  25359. +
  25360. +-
  25361. + struct wmi_pdev_set_regdomain_cmd {
  25362. + __le32 reg_domain;
  25363. + __le32 reg_domain_2G;
  25364. +@@ -2215,7 +2495,6 @@ struct wmi_pdev_set_quiet_cmd {
  25365. + __le32 enabled;
  25366. + } __packed;
  25367. +
  25368. +-
  25369. + /*
  25370. + * 802.11g protection mode.
  25371. + */
  25372. +@@ -2318,14 +2597,15 @@ struct wmi_pdev_param_map {
  25373. + u32 fast_channel_reset;
  25374. + u32 burst_dur;
  25375. + u32 burst_enable;
  25376. ++ u32 cal_period;
  25377. + };
  25378. +
  25379. + #define WMI_PDEV_PARAM_UNSUPPORTED 0
  25380. +
  25381. + enum wmi_pdev_param {
  25382. +- /* TX chian mask */
  25383. ++ /* TX chain mask */
  25384. + WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
  25385. +- /* RX chian mask */
  25386. ++ /* RX chain mask */
  25387. + WMI_PDEV_PARAM_RX_CHAIN_MASK,
  25388. + /* TX power limit for 2G Radio */
  25389. + WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
  25390. +@@ -2515,6 +2795,22 @@ enum wmi_10x_pdev_param {
  25391. + WMI_10X_PDEV_PARAM_BURST_DUR,
  25392. + /* Set Bursting Enable*/
  25393. + WMI_10X_PDEV_PARAM_BURST_ENABLE,
  25394. ++
  25395. ++ /* following are available as of firmware 10.2 */
  25396. ++ WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
  25397. ++ WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
  25398. ++ WMI_10X_PDEV_PARAM_IGMPMLD_TID,
  25399. ++ WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
  25400. ++ WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
  25401. ++ WMI_10X_PDEV_PARAM_RX_FILTER,
  25402. ++ WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
  25403. ++ WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
  25404. ++ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
  25405. ++ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
  25406. ++ WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
  25407. ++ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
  25408. ++ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
  25409. ++ WMI_10X_PDEV_PARAM_CAL_PERIOD
  25410. + };
  25411. +
  25412. + struct wmi_pdev_set_param_cmd {
  25413. +@@ -2522,6 +2818,9 @@ struct wmi_pdev_set_param_cmd {
  25414. + __le32 param_value;
  25415. + } __packed;
  25416. +
  25417. ++/* valid period is 1 ~ 60000ms, unit in millisecond */
  25418. ++#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
  25419. ++
  25420. + struct wmi_pdev_get_tpc_config_cmd {
  25421. + /* parameter */
  25422. + __le32 param;
  25423. +@@ -2565,11 +2864,6 @@ enum wmi_tp_scale {
  25424. + WMI_TP_SCALE_SIZE = 5, /* max num of enum */
  25425. + };
  25426. +
  25427. +-struct wmi_set_channel_cmd {
  25428. +- /* channel (only frequency and mode info are used) */
  25429. +- struct wmi_channel chan;
  25430. +-} __packed;
  25431. +-
  25432. + struct wmi_pdev_chanlist_update_event {
  25433. + /* number of channels */
  25434. + __le32 num_chan;
  25435. +@@ -2600,6 +2894,10 @@ struct wmi_pdev_set_channel_cmd {
  25436. + struct wmi_channel chan;
  25437. + } __packed;
  25438. +
  25439. ++struct wmi_pdev_pktlog_enable_cmd {
  25440. ++ __le32 ev_bitmap;
  25441. ++} __packed;
  25442. ++
  25443. + /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
  25444. + #define WMI_DSCP_MAP_MAX (64)
  25445. + struct wmi_pdev_set_dscp_tid_map_cmd {
  25446. +@@ -2642,14 +2940,14 @@ struct wmi_wmm_params_arg {
  25447. + u32 no_ack;
  25448. + };
  25449. +
  25450. +-struct wmi_pdev_set_wmm_params_arg {
  25451. ++struct wmi_wmm_params_all_arg {
  25452. + struct wmi_wmm_params_arg ac_be;
  25453. + struct wmi_wmm_params_arg ac_bk;
  25454. + struct wmi_wmm_params_arg ac_vi;
  25455. + struct wmi_wmm_params_arg ac_vo;
  25456. + };
  25457. +
  25458. +-struct wal_dbg_tx_stats {
  25459. ++struct wmi_pdev_stats_tx {
  25460. + /* Num HTT cookies queued to dispatch list */
  25461. + __le32 comp_queued;
  25462. +
  25463. +@@ -2719,7 +3017,7 @@ struct wal_dbg_tx_stats {
  25464. + __le32 txop_ovf;
  25465. + } __packed;
  25466. +
  25467. +-struct wal_dbg_rx_stats {
  25468. ++struct wmi_pdev_stats_rx {
  25469. + /* Cnts any change in ring routing mid-ppdu */
  25470. + __le32 mid_ppdu_route_change;
  25471. +
  25472. +@@ -2753,20 +3051,18 @@ struct wal_dbg_rx_stats {
  25473. + __le32 mpdu_errs;
  25474. + } __packed;
  25475. +
  25476. +-struct wal_dbg_peer_stats {
  25477. ++struct wmi_pdev_stats_peer {
  25478. + /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
  25479. + __le32 dummy;
  25480. + } __packed;
  25481. +
  25482. +-struct wal_dbg_stats {
  25483. +- struct wal_dbg_tx_stats tx;
  25484. +- struct wal_dbg_rx_stats rx;
  25485. +- struct wal_dbg_peer_stats peer;
  25486. +-} __packed;
  25487. +-
  25488. + enum wmi_stats_id {
  25489. +- WMI_REQUEST_PEER_STAT = 0x01,
  25490. +- WMI_REQUEST_AP_STAT = 0x02
  25491. ++ WMI_STAT_PEER = BIT(0),
  25492. ++ WMI_STAT_AP = BIT(1),
  25493. ++ WMI_STAT_PDEV = BIT(2),
  25494. ++ WMI_STAT_VDEV = BIT(3),
  25495. ++ WMI_STAT_BCNFLT = BIT(4),
  25496. ++ WMI_STAT_VDEV_RATE = BIT(5),
  25497. + };
  25498. +
  25499. + struct wlan_inst_rssi_args {
  25500. +@@ -2801,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
  25501. + } __packed;
  25502. +
  25503. + struct wmi_stats_event {
  25504. +- __le32 stats_id; /* %WMI_REQUEST_ */
  25505. ++ __le32 stats_id; /* WMI_STAT_ */
  25506. + /*
  25507. + * number of pdev stats event structures
  25508. + * (wmi_pdev_stats) 0 or 1
  25509. +@@ -2830,30 +3126,38 @@ struct wmi_stats_event {
  25510. + u8 data[0];
  25511. + } __packed;
  25512. +
  25513. ++struct wmi_10_2_stats_event {
  25514. ++ __le32 stats_id; /* %WMI_REQUEST_ */
  25515. ++ __le32 num_pdev_stats;
  25516. ++ __le32 num_pdev_ext_stats;
  25517. ++ __le32 num_vdev_stats;
  25518. ++ __le32 num_peer_stats;
  25519. ++ __le32 num_bcnflt_stats;
  25520. ++ u8 data[0];
  25521. ++} __packed;
  25522. ++
  25523. + /*
  25524. + * PDEV statistics
  25525. + * TODO: add all PDEV stats here
  25526. + */
  25527. +-struct wmi_pdev_stats_old {
  25528. +- __le32 chan_nf; /* Channel noise floor */
  25529. +- __le32 tx_frame_count; /* TX frame count */
  25530. +- __le32 rx_frame_count; /* RX frame count */
  25531. +- __le32 rx_clear_count; /* rx clear count */
  25532. +- __le32 cycle_count; /* cycle count */
  25533. +- __le32 phy_err_count; /* Phy error count */
  25534. +- __le32 chan_tx_pwr; /* channel tx power */
  25535. +- struct wal_dbg_stats wal; /* WAL dbg stats */
  25536. +-} __packed;
  25537. +-
  25538. +-struct wmi_pdev_stats_10x {
  25539. +- __le32 chan_nf; /* Channel noise floor */
  25540. +- __le32 tx_frame_count; /* TX frame count */
  25541. +- __le32 rx_frame_count; /* RX frame count */
  25542. +- __le32 rx_clear_count; /* rx clear count */
  25543. +- __le32 cycle_count; /* cycle count */
  25544. +- __le32 phy_err_count; /* Phy error count */
  25545. +- __le32 chan_tx_pwr; /* channel tx power */
  25546. +- struct wal_dbg_stats wal; /* WAL dbg stats */
  25547. ++struct wmi_pdev_stats_base {
  25548. ++ __le32 chan_nf;
  25549. ++ __le32 tx_frame_count;
  25550. ++ __le32 rx_frame_count;
  25551. ++ __le32 rx_clear_count;
  25552. ++ __le32 cycle_count;
  25553. ++ __le32 phy_err_count;
  25554. ++ __le32 chan_tx_pwr;
  25555. ++} __packed;
  25556. ++
  25557. ++struct wmi_pdev_stats {
  25558. ++ struct wmi_pdev_stats_base base;
  25559. ++ struct wmi_pdev_stats_tx tx;
  25560. ++ struct wmi_pdev_stats_rx rx;
  25561. ++ struct wmi_pdev_stats_peer peer;
  25562. ++} __packed;
  25563. ++
  25564. ++struct wmi_pdev_stats_extra {
  25565. + __le32 ack_rx_bad;
  25566. + __le32 rts_bad;
  25567. + __le32 rts_good;
  25568. +@@ -2862,6 +3166,30 @@ struct wmi_pdev_stats_10x {
  25569. + __le32 mib_int_count;
  25570. + } __packed;
  25571. +
  25572. ++struct wmi_10x_pdev_stats {
  25573. ++ struct wmi_pdev_stats_base base;
  25574. ++ struct wmi_pdev_stats_tx tx;
  25575. ++ struct wmi_pdev_stats_rx rx;
  25576. ++ struct wmi_pdev_stats_peer peer;
  25577. ++ struct wmi_pdev_stats_extra extra;
  25578. ++} __packed;
  25579. ++
  25580. ++struct wmi_pdev_stats_mem {
  25581. ++ __le32 dram_free;
  25582. ++ __le32 iram_free;
  25583. ++} __packed;
  25584. ++
  25585. ++struct wmi_10_2_pdev_stats {
  25586. ++ struct wmi_pdev_stats_base base;
  25587. ++ struct wmi_pdev_stats_tx tx;
  25588. ++ __le32 mc_drop;
  25589. ++ struct wmi_pdev_stats_rx rx;
  25590. ++ __le32 pdev_rx_timeout;
  25591. ++ struct wmi_pdev_stats_mem mem;
  25592. ++ struct wmi_pdev_stats_peer peer;
  25593. ++ struct wmi_pdev_stats_extra extra;
  25594. ++} __packed;
  25595. ++
  25596. + /*
  25597. + * VDEV statistics
  25598. + * TODO: add all VDEV stats here
  25599. +@@ -2874,19 +3202,43 @@ struct wmi_vdev_stats {
  25600. + * peer statistics.
  25601. + * TODO: add more stats
  25602. + */
  25603. +-struct wmi_peer_stats_old {
  25604. ++struct wmi_peer_stats {
  25605. + struct wmi_mac_addr peer_macaddr;
  25606. + __le32 peer_rssi;
  25607. + __le32 peer_tx_rate;
  25608. + } __packed;
  25609. +
  25610. +-struct wmi_peer_stats_10x {
  25611. +- struct wmi_mac_addr peer_macaddr;
  25612. +- __le32 peer_rssi;
  25613. +- __le32 peer_tx_rate;
  25614. ++struct wmi_10x_peer_stats {
  25615. ++ struct wmi_peer_stats old;
  25616. + __le32 peer_rx_rate;
  25617. + } __packed;
  25618. +
  25619. ++struct wmi_10_2_peer_stats {
  25620. ++ struct wmi_peer_stats old;
  25621. ++ __le32 peer_rx_rate;
  25622. ++ __le32 current_per;
  25623. ++ __le32 retries;
  25624. ++ __le32 tx_rate_count;
  25625. ++ __le32 max_4ms_frame_len;
  25626. ++ __le32 total_sub_frames;
  25627. ++ __le32 tx_bytes;
  25628. ++ __le32 num_pkt_loss_overflow[4];
  25629. ++ __le32 num_pkt_loss_excess_retry[4];
  25630. ++} __packed;
  25631. ++
  25632. ++struct wmi_10_2_4_peer_stats {
  25633. ++ struct wmi_10_2_peer_stats common;
  25634. ++ __le32 unknown_value; /* FIXME: what is this word? */
  25635. ++} __packed;
  25636. ++
  25637. ++struct wmi_10_2_pdev_ext_stats {
  25638. ++ __le32 rx_rssi_comb;
  25639. ++ __le32 rx_rssi[4];
  25640. ++ __le32 rx_mcs[10];
  25641. ++ __le32 tx_mcs[10];
  25642. ++ __le32 ack_rssi;
  25643. ++} __packed;
  25644. ++
  25645. + struct wmi_vdev_create_cmd {
  25646. + __le32 vdev_id;
  25647. + __le32 vdev_type;
  25648. +@@ -3387,8 +3739,21 @@ enum wmi_10x_vdev_param {
  25649. + WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  25650. +
  25651. + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  25652. ++
  25653. ++ /* following are available as of firmware 10.2 */
  25654. ++ WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
  25655. ++ WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
  25656. ++ WMI_10X_VDEV_PARAM_MFPTEST_SET,
  25657. ++ WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
  25658. ++ WMI_10X_VDEV_PARAM_VHT_SGIMASK,
  25659. ++ WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
  25660. + };
  25661. +
  25662. ++#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
  25663. ++#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
  25664. ++#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
  25665. ++#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
  25666. ++
  25667. + /* slot time long */
  25668. + #define WMI_VDEV_SLOT_TIME_LONG 0x1
  25669. + /* slot time short */
  25670. +@@ -3444,6 +3809,98 @@ struct wmi_vdev_simple_event {
  25671. + /* unsupported VDEV combination */
  25672. + #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
  25673. +
  25674. ++/* TODO: please add more comments if you have in-depth information */
  25675. ++struct wmi_vdev_spectral_conf_cmd {
  25676. ++ __le32 vdev_id;
  25677. ++
  25678. ++ /* number of fft samples to send (0 for infinite) */
  25679. ++ __le32 scan_count;
  25680. ++ __le32 scan_period;
  25681. ++ __le32 scan_priority;
  25682. ++
  25683. ++ /* number of bins in the FFT: 2^(fft_size - bin_scale) */
  25684. ++ __le32 scan_fft_size;
  25685. ++ __le32 scan_gc_ena;
  25686. ++ __le32 scan_restart_ena;
  25687. ++ __le32 scan_noise_floor_ref;
  25688. ++ __le32 scan_init_delay;
  25689. ++ __le32 scan_nb_tone_thr;
  25690. ++ __le32 scan_str_bin_thr;
  25691. ++ __le32 scan_wb_rpt_mode;
  25692. ++ __le32 scan_rssi_rpt_mode;
  25693. ++ __le32 scan_rssi_thr;
  25694. ++ __le32 scan_pwr_format;
  25695. ++
  25696. ++ /* rpt_mode: Format of FFT report to software for spectral scan
  25697. ++ * triggered FFTs:
  25698. ++ * 0: No FFT report (only spectral scan summary report)
  25699. ++ * 1: 2-dword summary of metrics for each completed FFT + spectral
  25700. ++ * scan summary report
  25701. ++ * 2: 2-dword summary of metrics for each completed FFT +
  25702. ++ * 1x- oversampled bins(in-band) per FFT + spectral scan summary
  25703. ++ * report
  25704. ++ * 3: 2-dword summary of metrics for each completed FFT +
  25705. ++ * 2x- oversampled bins (all) per FFT + spectral scan summary
  25706. ++ */
  25707. ++ __le32 scan_rpt_mode;
  25708. ++ __le32 scan_bin_scale;
  25709. ++ __le32 scan_dbm_adj;
  25710. ++ __le32 scan_chn_mask;
  25711. ++} __packed;
  25712. ++
  25713. ++struct wmi_vdev_spectral_conf_arg {
  25714. ++ u32 vdev_id;
  25715. ++ u32 scan_count;
  25716. ++ u32 scan_period;
  25717. ++ u32 scan_priority;
  25718. ++ u32 scan_fft_size;
  25719. ++ u32 scan_gc_ena;
  25720. ++ u32 scan_restart_ena;
  25721. ++ u32 scan_noise_floor_ref;
  25722. ++ u32 scan_init_delay;
  25723. ++ u32 scan_nb_tone_thr;
  25724. ++ u32 scan_str_bin_thr;
  25725. ++ u32 scan_wb_rpt_mode;
  25726. ++ u32 scan_rssi_rpt_mode;
  25727. ++ u32 scan_rssi_thr;
  25728. ++ u32 scan_pwr_format;
  25729. ++ u32 scan_rpt_mode;
  25730. ++ u32 scan_bin_scale;
  25731. ++ u32 scan_dbm_adj;
  25732. ++ u32 scan_chn_mask;
  25733. ++};
  25734. ++
  25735. ++#define WMI_SPECTRAL_ENABLE_DEFAULT 0
  25736. ++#define WMI_SPECTRAL_COUNT_DEFAULT 0
  25737. ++#define WMI_SPECTRAL_PERIOD_DEFAULT 35
  25738. ++#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
  25739. ++#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
  25740. ++#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
  25741. ++#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
  25742. ++#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
  25743. ++#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
  25744. ++#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
  25745. ++#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
  25746. ++#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
  25747. ++#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
  25748. ++#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
  25749. ++#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
  25750. ++#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
  25751. ++#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
  25752. ++#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
  25753. ++#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
  25754. ++
  25755. ++struct wmi_vdev_spectral_enable_cmd {
  25756. ++ __le32 vdev_id;
  25757. ++ __le32 trigger_cmd;
  25758. ++ __le32 enable_cmd;
  25759. ++} __packed;
  25760. ++
  25761. ++#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
  25762. ++#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
  25763. ++#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
  25764. ++#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
  25765. ++
  25766. + /* Beacon processing related command and event structures */
  25767. + struct wmi_bcn_tx_hdr {
  25768. + __le32 vdev_id;
  25769. +@@ -3470,6 +3927,11 @@ enum wmi_bcn_tx_ref_flags {
  25770. + WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
  25771. + };
  25772. +
  25773. ++/* TODO: It is unclear why "no antenna" works while any other seemingly valid
  25774. ++ * chainmask yields no beacons on the air at all.
  25775. ++ */
  25776. ++#define WMI_BCN_TX_REF_DEF_ANTENNA 0
  25777. ++
  25778. + struct wmi_bcn_tx_ref_cmd {
  25779. + __le32 vdev_id;
  25780. + __le32 data_len;
  25781. +@@ -3481,6 +3943,8 @@ struct wmi_bcn_tx_ref_cmd {
  25782. + __le32 frame_control;
  25783. + /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
  25784. + __le32 flags;
  25785. ++ /* introduced in 10.2 */
  25786. ++ __le32 antenna_mask;
  25787. + } __packed;
  25788. +
  25789. + /* Beacon filter */
  25790. +@@ -3633,6 +4097,13 @@ enum wmi_sta_ps_param_pspoll_count {
  25791. + * Values greater than 0 indicate the maximum numer of PS-Poll frames
  25792. + * FW will send before waking up.
  25793. + */
  25794. ++
  25795. ++ /* When u-APSD is enabled the firmware will be very reluctant to exit
  25796. ++ * STA PS. This could result in very poor Rx performance with STA doing
  25797. ++ * PS-Poll for each and every buffered frame. This value is a bit
  25798. ++ * arbitrary.
  25799. ++ */
  25800. ++ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
  25801. + };
  25802. +
  25803. + /*
  25804. +@@ -3658,6 +4129,30 @@ enum wmi_sta_ps_param_uapsd {
  25805. + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
  25806. + };
  25807. +
  25808. ++#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
  25809. ++
  25810. ++struct wmi_sta_uapsd_auto_trig_param {
  25811. ++ __le32 wmm_ac;
  25812. ++ __le32 user_priority;
  25813. ++ __le32 service_interval;
  25814. ++ __le32 suspend_interval;
  25815. ++ __le32 delay_interval;
  25816. ++};
  25817. ++
  25818. ++struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
  25819. ++ __le32 vdev_id;
  25820. ++ struct wmi_mac_addr peer_macaddr;
  25821. ++ __le32 num_ac;
  25822. ++};
  25823. ++
  25824. ++struct wmi_sta_uapsd_auto_trig_arg {
  25825. ++ u32 wmm_ac;
  25826. ++ u32 user_priority;
  25827. ++ u32 service_interval;
  25828. ++ u32 suspend_interval;
  25829. ++ u32 delay_interval;
  25830. ++};
  25831. ++
  25832. + enum wmi_sta_powersave_param {
  25833. + /*
  25834. + * Controls how frames are retrievd from AP while STA is sleeping
  25835. +@@ -3823,7 +4318,7 @@ struct wmi_bcn_info {
  25836. +
  25837. + struct wmi_host_swba_event {
  25838. + __le32 vdev_map;
  25839. +- struct wmi_bcn_info bcn_info[1];
  25840. ++ struct wmi_bcn_info bcn_info[0];
  25841. + } __packed;
  25842. +
  25843. + #define WMI_MAX_AP_VDEV 16
  25844. +@@ -3833,7 +4328,6 @@ struct wmi_tbtt_offset_event {
  25845. + __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
  25846. + } __packed;
  25847. +
  25848. +-
  25849. + struct wmi_peer_create_cmd {
  25850. + __le32 vdev_id;
  25851. + struct wmi_mac_addr peer_macaddr;
  25852. +@@ -3951,7 +4445,8 @@ enum wmi_peer_param {
  25853. + WMI_PEER_AUTHORIZE = 0x3,
  25854. + WMI_PEER_CHAN_WIDTH = 0x4,
  25855. + WMI_PEER_NSS = 0x5,
  25856. +- WMI_PEER_USE_4ADDR = 0x6
  25857. ++ WMI_PEER_USE_4ADDR = 0x6,
  25858. ++ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
  25859. + };
  25860. +
  25861. + struct wmi_peer_set_param_cmd {
  25862. +@@ -4029,7 +4524,7 @@ struct wmi_peer_set_q_empty_callback_cmd
  25863. + #define WMI_PEER_SPATIAL_MUX 0x00200000
  25864. + #define WMI_PEER_VHT 0x02000000
  25865. + #define WMI_PEER_80MHZ 0x04000000
  25866. +-#define WMI_PEER_PMF 0x08000000
  25867. ++#define WMI_PEER_VHT_2G 0x08000000
  25868. +
  25869. + /*
  25870. + * Peer rate capabilities.
  25871. +@@ -4053,7 +4548,7 @@ struct wmi_peer_set_q_empty_callback_cmd
  25872. + /* Maximum listen interval supported by hw in units of beacon interval */
  25873. + #define ATH10K_MAX_HW_LISTEN_INTERVAL 5
  25874. +
  25875. +-struct wmi_peer_assoc_complete_cmd {
  25876. ++struct wmi_common_peer_assoc_complete_cmd {
  25877. + struct wmi_mac_addr peer_macaddr;
  25878. + __le32 vdev_id;
  25879. + __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
  25880. +@@ -4071,11 +4566,30 @@ struct wmi_peer_assoc_complete_cmd {
  25881. + __le32 peer_vht_caps;
  25882. + __le32 peer_phymode;
  25883. + struct wmi_vht_rate_set peer_vht_rates;
  25884. ++};
  25885. ++
  25886. ++struct wmi_main_peer_assoc_complete_cmd {
  25887. ++ struct wmi_common_peer_assoc_complete_cmd cmd;
  25888. ++
  25889. + /* HT Operation Element of the peer. Five bytes packed in 2
  25890. + * INT32 array and filled from lsb to msb. */
  25891. + __le32 peer_ht_info[2];
  25892. + } __packed;
  25893. +
  25894. ++struct wmi_10_1_peer_assoc_complete_cmd {
  25895. ++ struct wmi_common_peer_assoc_complete_cmd cmd;
  25896. ++} __packed;
  25897. ++
  25898. ++#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
  25899. ++#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
  25900. ++#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
  25901. ++#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
  25902. ++
  25903. ++struct wmi_10_2_peer_assoc_complete_cmd {
  25904. ++ struct wmi_common_peer_assoc_complete_cmd cmd;
  25905. ++ __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
  25906. ++} __packed;
  25907. ++
  25908. + struct wmi_peer_assoc_complete_arg {
  25909. + u8 addr[ETH_ALEN];
  25910. + u32 vdev_id;
  25911. +@@ -4161,6 +4675,11 @@ enum wmi_sta_keepalive_method {
  25912. + WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
  25913. + };
  25914. +
  25915. ++#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
  25916. ++
  25917. ++/* Firmware crashes if keepalive interval exceeds this limit */
  25918. ++#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
  25919. ++
  25920. + /* note: ip4 addresses are in network byte order, i.e. big endian */
  25921. + struct wmi_sta_keepalive_arp_resp {
  25922. + __be32 src_ip4_addr;
  25923. +@@ -4176,6 +4695,16 @@ struct wmi_sta_keepalive_cmd {
  25924. + struct wmi_sta_keepalive_arp_resp arp_resp;
  25925. + } __packed;
  25926. +
  25927. ++struct wmi_sta_keepalive_arg {
  25928. ++ u32 vdev_id;
  25929. ++ u32 enabled;
  25930. ++ u32 method;
  25931. ++ u32 interval;
  25932. ++ __be32 src_ip4_addr;
  25933. ++ __be32 dest_ip4_addr;
  25934. ++ const u8 dest_mac_addr[ETH_ALEN];
  25935. ++};
  25936. ++
  25937. + enum wmi_force_fw_hang_type {
  25938. + WMI_FORCE_FW_HANG_ASSERT = 1,
  25939. + WMI_FORCE_FW_HANG_NO_DETECT,
  25940. +@@ -4240,7 +4769,6 @@ struct wmi_dbglog_cfg_cmd {
  25941. + __le32 config_valid;
  25942. + } __packed;
  25943. +
  25944. +-#define ATH10K_RTS_MAX 2347
  25945. + #define ATH10K_FRAGMT_THRESHOLD_MIN 540
  25946. + #define ATH10K_FRAGMT_THRESHOLD_MAX 2346
  25947. +
  25948. +@@ -4251,72 +4779,170 @@ struct wmi_dbglog_cfg_cmd {
  25949. + /* By default disable power save for IBSS */
  25950. + #define ATH10K_DEFAULT_ATIM 0
  25951. +
  25952. ++#define WMI_MAX_MEM_REQS 16
  25953. ++
  25954. ++struct wmi_scan_ev_arg {
  25955. ++ __le32 event_type; /* %WMI_SCAN_EVENT_ */
  25956. ++ __le32 reason; /* %WMI_SCAN_REASON_ */
  25957. ++ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
  25958. ++ __le32 scan_req_id;
  25959. ++ __le32 scan_id;
  25960. ++ __le32 vdev_id;
  25961. ++};
  25962. ++
  25963. ++struct wmi_mgmt_rx_ev_arg {
  25964. ++ __le32 channel;
  25965. ++ __le32 snr;
  25966. ++ __le32 rate;
  25967. ++ __le32 phy_mode;
  25968. ++ __le32 buf_len;
  25969. ++ __le32 status; /* %WMI_RX_STATUS_ */
  25970. ++};
  25971. ++
  25972. ++struct wmi_ch_info_ev_arg {
  25973. ++ __le32 err_code;
  25974. ++ __le32 freq;
  25975. ++ __le32 cmd_flags;
  25976. ++ __le32 noise_floor;
  25977. ++ __le32 rx_clear_count;
  25978. ++ __le32 cycle_count;
  25979. ++};
  25980. ++
  25981. ++struct wmi_vdev_start_ev_arg {
  25982. ++ __le32 vdev_id;
  25983. ++ __le32 req_id;
  25984. ++ __le32 resp_type; /* %WMI_VDEV_RESP_ */
  25985. ++ __le32 status;
  25986. ++};
  25987. ++
  25988. ++struct wmi_peer_kick_ev_arg {
  25989. ++ const u8 *mac_addr;
  25990. ++};
  25991. ++
  25992. ++struct wmi_swba_ev_arg {
  25993. ++ __le32 vdev_map;
  25994. ++ const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
  25995. ++ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
  25996. ++};
  25997. ++
  25998. ++struct wmi_phyerr_ev_arg {
  25999. ++ __le32 num_phyerrs;
  26000. ++ __le32 tsf_l32;
  26001. ++ __le32 tsf_u32;
  26002. ++ __le32 buf_len;
  26003. ++ const struct wmi_phyerr *phyerrs;
  26004. ++};
  26005. ++
  26006. ++struct wmi_svc_rdy_ev_arg {
  26007. ++ __le32 min_tx_power;
  26008. ++ __le32 max_tx_power;
  26009. ++ __le32 ht_cap;
  26010. ++ __le32 vht_cap;
  26011. ++ __le32 sw_ver0;
  26012. ++ __le32 sw_ver1;
  26013. ++ __le32 fw_build;
  26014. ++ __le32 phy_capab;
  26015. ++ __le32 num_rf_chains;
  26016. ++ __le32 eeprom_rd;
  26017. ++ __le32 num_mem_reqs;
  26018. ++ const __le32 *service_map;
  26019. ++ size_t service_map_len;
  26020. ++ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
  26021. ++};
  26022. ++
  26023. ++struct wmi_rdy_ev_arg {
  26024. ++ __le32 sw_version;
  26025. ++ __le32 abi_version;
  26026. ++ __le32 status;
  26027. ++ const u8 *mac_addr;
  26028. ++};
  26029. ++
  26030. ++struct wmi_pdev_temperature_event {
  26031. ++ /* temperature value in Celcius degree */
  26032. ++ __le32 temperature;
  26033. ++} __packed;
  26034. ++
  26035. + struct ath10k;
  26036. + struct ath10k_vif;
  26037. ++struct ath10k_fw_stats_pdev;
  26038. ++struct ath10k_fw_stats_peer;
  26039. +
  26040. + int ath10k_wmi_attach(struct ath10k *ar);
  26041. + void ath10k_wmi_detach(struct ath10k *ar);
  26042. + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
  26043. + int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
  26044. +
  26045. +-int ath10k_wmi_connect_htc_service(struct ath10k *ar);
  26046. +-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
  26047. +- const struct wmi_channel_arg *);
  26048. +-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
  26049. +-int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
  26050. +-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  26051. +- u16 rd5g, u16 ctl2g, u16 ctl5g,
  26052. +- enum wmi_dfs_region dfs_reg);
  26053. +-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
  26054. +-int ath10k_wmi_cmd_init(struct ath10k *ar);
  26055. +-int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
  26056. ++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
  26057. ++int ath10k_wmi_connect(struct ath10k *ar);
  26058. ++
  26059. ++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
  26060. ++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  26061. ++int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  26062. ++ u32 cmd_id);
  26063. + void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
  26064. +-int ath10k_wmi_stop_scan(struct ath10k *ar,
  26065. +- const struct wmi_stop_scan_arg *arg);
  26066. +-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  26067. +- enum wmi_vdev_type type,
  26068. +- enum wmi_vdev_subtype subtype,
  26069. +- const u8 macaddr[ETH_ALEN]);
  26070. +-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
  26071. +-int ath10k_wmi_vdev_start(struct ath10k *ar,
  26072. +- const struct wmi_vdev_start_request_arg *);
  26073. +-int ath10k_wmi_vdev_restart(struct ath10k *ar,
  26074. +- const struct wmi_vdev_start_request_arg *);
  26075. +-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
  26076. +-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
  26077. +- const u8 *bssid);
  26078. +-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
  26079. +-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  26080. +- u32 param_id, u32 param_value);
  26081. +-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  26082. +- const struct wmi_vdev_install_key_arg *arg);
  26083. +-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  26084. +- const u8 peer_addr[ETH_ALEN]);
  26085. +-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  26086. +- const u8 peer_addr[ETH_ALEN]);
  26087. +-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  26088. +- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
  26089. +-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  26090. +- const u8 *peer_addr,
  26091. +- enum wmi_peer_param param_id, u32 param_value);
  26092. +-int ath10k_wmi_peer_assoc(struct ath10k *ar,
  26093. +- const struct wmi_peer_assoc_complete_arg *arg);
  26094. +-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  26095. +- enum wmi_sta_ps_mode psmode);
  26096. +-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  26097. +- enum wmi_sta_powersave_param param_id,
  26098. +- u32 value);
  26099. +-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  26100. +- enum wmi_ap_ps_peer_param param_id, u32 value);
  26101. +-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  26102. +- const struct wmi_scan_chan_list_arg *arg);
  26103. +-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
  26104. +-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  26105. +- const struct wmi_pdev_set_wmm_params_arg *arg);
  26106. +-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
  26107. +-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
  26108. +- enum wmi_force_fw_hang_type type, u32 delay_ms);
  26109. +-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
  26110. +-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
  26111. ++
  26112. ++void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
  26113. ++ struct ath10k_fw_stats_pdev *dst);
  26114. ++void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
  26115. ++ struct ath10k_fw_stats_pdev *dst);
  26116. ++void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
  26117. ++ struct ath10k_fw_stats_pdev *dst);
  26118. ++void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
  26119. ++ struct ath10k_fw_stats_pdev *dst);
  26120. ++void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
  26121. ++ struct ath10k_fw_stats_peer *dst);
  26122. ++void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
  26123. ++ struct wmi_host_mem_chunks *chunks);
  26124. ++void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
  26125. ++ const struct wmi_start_scan_arg *arg);
  26126. ++void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
  26127. ++ const struct wmi_wmm_params_arg *arg);
  26128. ++void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
  26129. ++ const struct wmi_channel_arg *arg);
  26130. ++int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
  26131. ++
  26132. ++int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
  26133. ++int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
  26134. ++void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
  26135. ++void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
  26136. ++int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
  26137. ++void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
  26138. ++void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
  26139. ++void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
  26140. ++void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
  26141. ++void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
  26142. ++void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
  26143. ++void ath10k_wmi_event_dfs(struct ath10k *ar,
  26144. ++ const struct wmi_phyerr *phyerr, u64 tsf);
  26145. ++void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  26146. ++ const struct wmi_phyerr *phyerr,
  26147. ++ u64 tsf);
  26148. ++void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
  26149. ++void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
  26150. ++void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
  26151. ++void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
  26152. ++void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
  26153. ++void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
  26154. ++void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  26155. ++ struct sk_buff *skb);
  26156. ++void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  26157. ++ struct sk_buff *skb);
  26158. ++void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
  26159. ++void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
  26160. ++void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
  26161. ++void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
  26162. ++void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
  26163. ++void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  26164. ++ struct sk_buff *skb);
  26165. ++void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
  26166. ++void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
  26167. ++void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
  26168. ++void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  26169. ++ struct sk_buff *skb);
  26170. ++void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
  26171. ++void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
  26172. ++void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
  26173. ++void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
  26174. ++int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
  26175. +
  26176. + #endif /* _WMI_H_ */
  26177. +--- /dev/null
  26178. ++++ b/drivers/net/wireless/ath/ath10k/spectral.c
  26179. +@@ -0,0 +1,552 @@
  26180. ++/*
  26181. ++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
  26182. ++ *
  26183. ++ * Permission to use, copy, modify, and/or distribute this software for any
  26184. ++ * purpose with or without fee is hereby granted, provided that the above
  26185. ++ * copyright notice and this permission notice appear in all copies.
  26186. ++ *
  26187. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  26188. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26189. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  26190. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  26191. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  26192. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  26193. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  26194. ++ */
  26195. ++
  26196. ++#include <linux/relay.h>
  26197. ++#include "core.h"
  26198. ++#include "debug.h"
  26199. ++#include "wmi-ops.h"
  26200. ++
  26201. ++static void send_fft_sample(struct ath10k *ar,
  26202. ++ const struct fft_sample_tlv *fft_sample_tlv)
  26203. ++{
  26204. ++ int length;
  26205. ++
  26206. ++ if (!ar->spectral.rfs_chan_spec_scan)
  26207. ++ return;
  26208. ++
  26209. ++ length = __be16_to_cpu(fft_sample_tlv->length) +
  26210. ++ sizeof(*fft_sample_tlv);
  26211. ++ relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
  26212. ++}
  26213. ++
  26214. ++static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
  26215. ++ u8 *data)
  26216. ++{
  26217. ++ int dc_pos;
  26218. ++ u8 max_exp;
  26219. ++
  26220. ++ dc_pos = bin_len / 2;
  26221. ++
  26222. ++ /* peak index outside of bins */
  26223. ++ if (dc_pos < max_index || -dc_pos >= max_index)
  26224. ++ return 0;
  26225. ++
  26226. ++ for (max_exp = 0; max_exp < 8; max_exp++) {
  26227. ++ if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
  26228. ++ break;
  26229. ++ }
  26230. ++
  26231. ++ /* max_exp not found */
  26232. ++ if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
  26233. ++ return 0;
  26234. ++
  26235. ++ return max_exp;
  26236. ++}
  26237. ++
  26238. ++int ath10k_spectral_process_fft(struct ath10k *ar,
  26239. ++ const struct wmi_phyerr *phyerr,
  26240. ++ const struct phyerr_fft_report *fftr,
  26241. ++ size_t bin_len, u64 tsf)
  26242. ++{
  26243. ++ struct fft_sample_ath10k *fft_sample;
  26244. ++ u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
  26245. ++ u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
  26246. ++ u32 reg0, reg1;
  26247. ++ u8 chain_idx, *bins;
  26248. ++ int dc_pos;
  26249. ++
  26250. ++ fft_sample = (struct fft_sample_ath10k *)&buf;
  26251. ++
  26252. ++ if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
  26253. ++ return -EINVAL;
  26254. ++
  26255. ++ reg0 = __le32_to_cpu(fftr->reg0);
  26256. ++ reg1 = __le32_to_cpu(fftr->reg1);
  26257. ++
  26258. ++ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
  26259. ++ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
  26260. ++ fft_sample->tlv.length = __cpu_to_be16(length);
  26261. ++
  26262. ++ /* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
  26263. ++ * but the results/plots suggest that its actually 22/44/88 MHz.
  26264. ++ */
  26265. ++ switch (phyerr->chan_width_mhz) {
  26266. ++ case 20:
  26267. ++ fft_sample->chan_width_mhz = 22;
  26268. ++ break;
  26269. ++ case 40:
  26270. ++ fft_sample->chan_width_mhz = 44;
  26271. ++ break;
  26272. ++ case 80:
  26273. ++ /* TODO: As experiments with an analogue sender and various
  26274. ++ * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
  26275. ++ * show, the particular configuration of 80 MHz/64 bins does
  26276. ++ * not match with the other smaples at all. Until the reason
  26277. ++ * for that is found, don't report these samples.
  26278. ++ */
  26279. ++ if (bin_len == 64)
  26280. ++ return -EINVAL;
  26281. ++ fft_sample->chan_width_mhz = 88;
  26282. ++ break;
  26283. ++ default:
  26284. ++ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
  26285. ++ }
  26286. ++
  26287. ++ fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
  26288. ++ fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
  26289. ++
  26290. ++ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
  26291. ++ fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
  26292. ++ fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
  26293. ++ fft_sample->rssi = phyerr->rssi_combined;
  26294. ++
  26295. ++ total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
  26296. ++ base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
  26297. ++ fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
  26298. ++ fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
  26299. ++
  26300. ++ freq1 = __le16_to_cpu(phyerr->freq1);
  26301. ++ freq2 = __le16_to_cpu(phyerr->freq2);
  26302. ++ fft_sample->freq1 = __cpu_to_be16(freq1);
  26303. ++ fft_sample->freq2 = __cpu_to_be16(freq2);
  26304. ++
  26305. ++ chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
  26306. ++
  26307. ++ fft_sample->noise = __cpu_to_be16(
  26308. ++ __le16_to_cpu(phyerr->nf_chains[chain_idx]));
  26309. ++
  26310. ++ bins = (u8 *)fftr;
  26311. ++ bins += sizeof(*fftr);
  26312. ++
  26313. ++ fft_sample->tsf = __cpu_to_be64(tsf);
  26314. ++
  26315. ++ /* max_exp has been directly reported by previous hardware (ath9k),
  26316. ++ * maybe its possible to get it by other means?
  26317. ++ */
  26318. ++ fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
  26319. ++ bin_len, bins);
  26320. ++
  26321. ++ memcpy(fft_sample->data, bins, bin_len);
  26322. ++
  26323. ++ /* DC value (value in the middle) is the blind spot of the spectral
  26324. ++ * sample and invalid, interpolate it.
  26325. ++ */
  26326. ++ dc_pos = bin_len / 2;
  26327. ++ fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
  26328. ++ fft_sample->data[dc_pos - 1]) / 2;
  26329. ++
  26330. ++ send_fft_sample(ar, &fft_sample->tlv);
  26331. ++
  26332. ++ return 0;
  26333. ++}
  26334. ++
  26335. ++static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
  26336. ++{
  26337. ++ struct ath10k_vif *arvif;
  26338. ++
  26339. ++ lockdep_assert_held(&ar->conf_mutex);
  26340. ++
  26341. ++ if (list_empty(&ar->arvifs))
  26342. ++ return NULL;
  26343. ++
  26344. ++ /* if there already is a vif doing spectral, return that. */
  26345. ++ list_for_each_entry(arvif, &ar->arvifs, list)
  26346. ++ if (arvif->spectral_enabled)
  26347. ++ return arvif;
  26348. ++
  26349. ++ /* otherwise, return the first vif. */
  26350. ++ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
  26351. ++}
  26352. ++
  26353. ++static int ath10k_spectral_scan_trigger(struct ath10k *ar)
  26354. ++{
  26355. ++ struct ath10k_vif *arvif;
  26356. ++ int res;
  26357. ++ int vdev_id;
  26358. ++
  26359. ++ lockdep_assert_held(&ar->conf_mutex);
  26360. ++
  26361. ++ arvif = ath10k_get_spectral_vdev(ar);
  26362. ++ if (!arvif)
  26363. ++ return -ENODEV;
  26364. ++ vdev_id = arvif->vdev_id;
  26365. ++
  26366. ++ if (ar->spectral.mode == SPECTRAL_DISABLED)
  26367. ++ return 0;
  26368. ++
  26369. ++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
  26370. ++ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
  26371. ++ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
  26372. ++ if (res < 0)
  26373. ++ return res;
  26374. ++
  26375. ++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
  26376. ++ WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
  26377. ++ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
  26378. ++ if (res < 0)
  26379. ++ return res;
  26380. ++
  26381. ++ return 0;
  26382. ++}
  26383. ++
  26384. ++static int ath10k_spectral_scan_config(struct ath10k *ar,
  26385. ++ enum ath10k_spectral_mode mode)
  26386. ++{
  26387. ++ struct wmi_vdev_spectral_conf_arg arg;
  26388. ++ struct ath10k_vif *arvif;
  26389. ++ int vdev_id, count, res = 0;
  26390. ++
  26391. ++ lockdep_assert_held(&ar->conf_mutex);
  26392. ++
  26393. ++ arvif = ath10k_get_spectral_vdev(ar);
  26394. ++ if (!arvif)
  26395. ++ return -ENODEV;
  26396. ++
  26397. ++ vdev_id = arvif->vdev_id;
  26398. ++
  26399. ++ arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
  26400. ++ ar->spectral.mode = mode;
  26401. ++
  26402. ++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
  26403. ++ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
  26404. ++ WMI_SPECTRAL_ENABLE_CMD_DISABLE);
  26405. ++ if (res < 0) {
  26406. ++ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
  26407. ++ return res;
  26408. ++ }
  26409. ++
  26410. ++ if (mode == SPECTRAL_DISABLED)
  26411. ++ return 0;
  26412. ++
  26413. ++ if (mode == SPECTRAL_BACKGROUND)
  26414. ++ count = WMI_SPECTRAL_COUNT_DEFAULT;
  26415. ++ else
  26416. ++ count = max_t(u8, 1, ar->spectral.config.count);
  26417. ++
  26418. ++ arg.vdev_id = vdev_id;
  26419. ++ arg.scan_count = count;
  26420. ++ arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
  26421. ++ arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
  26422. ++ arg.scan_fft_size = ar->spectral.config.fft_size;
  26423. ++ arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
  26424. ++ arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
  26425. ++ arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
  26426. ++ arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
  26427. ++ arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
  26428. ++ arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
  26429. ++ arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
  26430. ++ arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
  26431. ++ arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
  26432. ++ arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
  26433. ++ arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
  26434. ++ arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
  26435. ++ arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
  26436. ++ arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
  26437. ++
  26438. ++ res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
  26439. ++ if (res < 0) {
  26440. ++ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
  26441. ++ return res;
  26442. ++ }
  26443. ++
  26444. ++ return 0;
  26445. ++}
  26446. ++
  26447. ++static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
  26448. ++ size_t count, loff_t *ppos)
  26449. ++{
  26450. ++ struct ath10k *ar = file->private_data;
  26451. ++ char *mode = "";
  26452. ++ unsigned int len;
  26453. ++ enum ath10k_spectral_mode spectral_mode;
  26454. ++
  26455. ++ mutex_lock(&ar->conf_mutex);
  26456. ++ spectral_mode = ar->spectral.mode;
  26457. ++ mutex_unlock(&ar->conf_mutex);
  26458. ++
  26459. ++ switch (spectral_mode) {
  26460. ++ case SPECTRAL_DISABLED:
  26461. ++ mode = "disable";
  26462. ++ break;
  26463. ++ case SPECTRAL_BACKGROUND:
  26464. ++ mode = "background";
  26465. ++ break;
  26466. ++ case SPECTRAL_MANUAL:
  26467. ++ mode = "manual";
  26468. ++ break;
  26469. ++ }
  26470. ++
  26471. ++ len = strlen(mode);
  26472. ++ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
  26473. ++}
  26474. ++
  26475. ++static ssize_t write_file_spec_scan_ctl(struct file *file,
  26476. ++ const char __user *user_buf,
  26477. ++ size_t count, loff_t *ppos)
  26478. ++{
  26479. ++ struct ath10k *ar = file->private_data;
  26480. ++ char buf[32];
  26481. ++ ssize_t len;
  26482. ++ int res;
  26483. ++
  26484. ++ len = min(count, sizeof(buf) - 1);
  26485. ++ if (copy_from_user(buf, user_buf, len))
  26486. ++ return -EFAULT;
  26487. ++
  26488. ++ buf[len] = '\0';
  26489. ++
  26490. ++ mutex_lock(&ar->conf_mutex);
  26491. ++
  26492. ++ if (strncmp("trigger", buf, 7) == 0) {
  26493. ++ if (ar->spectral.mode == SPECTRAL_MANUAL ||
  26494. ++ ar->spectral.mode == SPECTRAL_BACKGROUND) {
  26495. ++ /* reset the configuration to adopt possibly changed
  26496. ++ * debugfs parameters
  26497. ++ */
  26498. ++ res = ath10k_spectral_scan_config(ar,
  26499. ++ ar->spectral.mode);
  26500. ++ if (res < 0) {
  26501. ++ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
  26502. ++ res);
  26503. ++ }
  26504. ++ res = ath10k_spectral_scan_trigger(ar);
  26505. ++ if (res < 0) {
  26506. ++ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
  26507. ++ res);
  26508. ++ }
  26509. ++ } else {
  26510. ++ res = -EINVAL;
  26511. ++ }
  26512. ++ } else if (strncmp("background", buf, 9) == 0) {
  26513. ++ res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
  26514. ++ } else if (strncmp("manual", buf, 6) == 0) {
  26515. ++ res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
  26516. ++ } else if (strncmp("disable", buf, 7) == 0) {
  26517. ++ res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
  26518. ++ } else {
  26519. ++ res = -EINVAL;
  26520. ++ }
  26521. ++
  26522. ++ mutex_unlock(&ar->conf_mutex);
  26523. ++
  26524. ++ if (res < 0)
  26525. ++ return res;
  26526. ++
  26527. ++ return count;
  26528. ++}
  26529. ++
  26530. ++static const struct file_operations fops_spec_scan_ctl = {
  26531. ++ .read = read_file_spec_scan_ctl,
  26532. ++ .write = write_file_spec_scan_ctl,
  26533. ++ .open = simple_open,
  26534. ++ .owner = THIS_MODULE,
  26535. ++ .llseek = default_llseek,
  26536. ++};
  26537. ++
  26538. ++static ssize_t read_file_spectral_count(struct file *file,
  26539. ++ char __user *user_buf,
  26540. ++ size_t count, loff_t *ppos)
  26541. ++{
  26542. ++ struct ath10k *ar = file->private_data;
  26543. ++ char buf[32];
  26544. ++ unsigned int len;
  26545. ++ u8 spectral_count;
  26546. ++
  26547. ++ mutex_lock(&ar->conf_mutex);
  26548. ++ spectral_count = ar->spectral.config.count;
  26549. ++ mutex_unlock(&ar->conf_mutex);
  26550. ++
  26551. ++ len = sprintf(buf, "%d\n", spectral_count);
  26552. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  26553. ++}
  26554. ++
  26555. ++static ssize_t write_file_spectral_count(struct file *file,
  26556. ++ const char __user *user_buf,
  26557. ++ size_t count, loff_t *ppos)
  26558. ++{
  26559. ++ struct ath10k *ar = file->private_data;
  26560. ++ unsigned long val;
  26561. ++ char buf[32];
  26562. ++ ssize_t len;
  26563. ++
  26564. ++ len = min(count, sizeof(buf) - 1);
  26565. ++ if (copy_from_user(buf, user_buf, len))
  26566. ++ return -EFAULT;
  26567. ++
  26568. ++ buf[len] = '\0';
  26569. ++ if (kstrtoul(buf, 0, &val))
  26570. ++ return -EINVAL;
  26571. ++
  26572. ++ if (val < 0 || val > 255)
  26573. ++ return -EINVAL;
  26574. ++
  26575. ++ mutex_lock(&ar->conf_mutex);
  26576. ++ ar->spectral.config.count = val;
  26577. ++ mutex_unlock(&ar->conf_mutex);
  26578. ++
  26579. ++ return count;
  26580. ++}
  26581. ++
  26582. ++static const struct file_operations fops_spectral_count = {
  26583. ++ .read = read_file_spectral_count,
  26584. ++ .write = write_file_spectral_count,
  26585. ++ .open = simple_open,
  26586. ++ .owner = THIS_MODULE,
  26587. ++ .llseek = default_llseek,
  26588. ++};
  26589. ++
  26590. ++static ssize_t read_file_spectral_bins(struct file *file,
  26591. ++ char __user *user_buf,
  26592. ++ size_t count, loff_t *ppos)
  26593. ++{
  26594. ++ struct ath10k *ar = file->private_data;
  26595. ++ char buf[32];
  26596. ++ unsigned int len, bins, fft_size, bin_scale;
  26597. ++
  26598. ++ mutex_lock(&ar->conf_mutex);
  26599. ++
  26600. ++ fft_size = ar->spectral.config.fft_size;
  26601. ++ bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
  26602. ++ bins = 1 << (fft_size - bin_scale);
  26603. ++
  26604. ++ mutex_unlock(&ar->conf_mutex);
  26605. ++
  26606. ++ len = sprintf(buf, "%d\n", bins);
  26607. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  26608. ++}
  26609. ++
  26610. ++static ssize_t write_file_spectral_bins(struct file *file,
  26611. ++ const char __user *user_buf,
  26612. ++ size_t count, loff_t *ppos)
  26613. ++{
  26614. ++ struct ath10k *ar = file->private_data;
  26615. ++ unsigned long val;
  26616. ++ char buf[32];
  26617. ++ ssize_t len;
  26618. ++
  26619. ++ len = min(count, sizeof(buf) - 1);
  26620. ++ if (copy_from_user(buf, user_buf, len))
  26621. ++ return -EFAULT;
  26622. ++
  26623. ++ buf[len] = '\0';
  26624. ++ if (kstrtoul(buf, 0, &val))
  26625. ++ return -EINVAL;
  26626. ++
  26627. ++ if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
  26628. ++ return -EINVAL;
  26629. ++
  26630. ++ if (!is_power_of_2(val))
  26631. ++ return -EINVAL;
  26632. ++
  26633. ++ mutex_lock(&ar->conf_mutex);
  26634. ++ ar->spectral.config.fft_size = ilog2(val);
  26635. ++ ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
  26636. ++ mutex_unlock(&ar->conf_mutex);
  26637. ++
  26638. ++ return count;
  26639. ++}
  26640. ++
  26641. ++static const struct file_operations fops_spectral_bins = {
  26642. ++ .read = read_file_spectral_bins,
  26643. ++ .write = write_file_spectral_bins,
  26644. ++ .open = simple_open,
  26645. ++ .owner = THIS_MODULE,
  26646. ++ .llseek = default_llseek,
  26647. ++};
  26648. ++
  26649. ++static struct dentry *create_buf_file_handler(const char *filename,
  26650. ++ struct dentry *parent,
  26651. ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
  26652. ++ umode_t mode,
  26653. ++#else
  26654. ++ int mode,
  26655. ++#endif
  26656. ++ struct rchan_buf *buf,
  26657. ++ int *is_global)
  26658. ++{
  26659. ++ struct dentry *buf_file;
  26660. ++
  26661. ++ buf_file = debugfs_create_file(filename, mode, parent, buf,
  26662. ++ &relay_file_operations);
  26663. ++ *is_global = 1;
  26664. ++ return buf_file;
  26665. ++}
  26666. ++
  26667. ++static int remove_buf_file_handler(struct dentry *dentry)
  26668. ++{
  26669. ++ debugfs_remove(dentry);
  26670. ++
  26671. ++ return 0;
  26672. ++}
  26673. ++
  26674. ++static struct rchan_callbacks rfs_spec_scan_cb = {
  26675. ++ .create_buf_file = create_buf_file_handler,
  26676. ++ .remove_buf_file = remove_buf_file_handler,
  26677. ++};
  26678. ++
  26679. ++int ath10k_spectral_start(struct ath10k *ar)
  26680. ++{
  26681. ++ struct ath10k_vif *arvif;
  26682. ++
  26683. ++ lockdep_assert_held(&ar->conf_mutex);
  26684. ++
  26685. ++ list_for_each_entry(arvif, &ar->arvifs, list)
  26686. ++ arvif->spectral_enabled = 0;
  26687. ++
  26688. ++ ar->spectral.mode = SPECTRAL_DISABLED;
  26689. ++ ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
  26690. ++ ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
  26691. ++
  26692. ++ return 0;
  26693. ++}
  26694. ++
  26695. ++int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
  26696. ++{
  26697. ++ if (!arvif->spectral_enabled)
  26698. ++ return 0;
  26699. ++
  26700. ++ return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
  26701. ++}
  26702. ++
  26703. ++int ath10k_spectral_create(struct ath10k *ar)
  26704. ++{
  26705. ++ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
  26706. ++ ar->debug.debugfs_phy,
  26707. ++ 1024, 256,
  26708. ++ &rfs_spec_scan_cb, NULL);
  26709. ++ debugfs_create_file("spectral_scan_ctl",
  26710. ++ S_IRUSR | S_IWUSR,
  26711. ++ ar->debug.debugfs_phy, ar,
  26712. ++ &fops_spec_scan_ctl);
  26713. ++ debugfs_create_file("spectral_count",
  26714. ++ S_IRUSR | S_IWUSR,
  26715. ++ ar->debug.debugfs_phy, ar,
  26716. ++ &fops_spectral_count);
  26717. ++ debugfs_create_file("spectral_bins",
  26718. ++ S_IRUSR | S_IWUSR,
  26719. ++ ar->debug.debugfs_phy, ar,
  26720. ++ &fops_spectral_bins);
  26721. ++
  26722. ++ return 0;
  26723. ++}
  26724. ++
  26725. ++void ath10k_spectral_destroy(struct ath10k *ar)
  26726. ++{
  26727. ++ if (ar->spectral.rfs_chan_spec_scan) {
  26728. ++ relay_close(ar->spectral.rfs_chan_spec_scan);
  26729. ++ ar->spectral.rfs_chan_spec_scan = NULL;
  26730. ++ }
  26731. ++}
  26732. +--- /dev/null
  26733. ++++ b/drivers/net/wireless/ath/ath10k/spectral.h
  26734. +@@ -0,0 +1,90 @@
  26735. ++/*
  26736. ++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
  26737. ++ *
  26738. ++ * Permission to use, copy, modify, and/or distribute this software for any
  26739. ++ * purpose with or without fee is hereby granted, provided that the above
  26740. ++ * copyright notice and this permission notice appear in all copies.
  26741. ++ *
  26742. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  26743. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26744. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  26745. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  26746. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  26747. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  26748. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  26749. ++ */
  26750. ++
  26751. ++#ifndef SPECTRAL_H
  26752. ++#define SPECTRAL_H
  26753. ++
  26754. ++#include "../spectral_common.h"
  26755. ++
  26756. ++/**
  26757. ++ * struct ath10k_spec_scan - parameters for Atheros spectral scan
  26758. ++ *
  26759. ++ * @count: number of scan results requested for manual mode
  26760. ++ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
  26761. ++ */
  26762. ++struct ath10k_spec_scan {
  26763. ++ u8 count;
  26764. ++ u8 fft_size;
  26765. ++};
  26766. ++
  26767. ++/* enum ath10k_spectral_mode:
  26768. ++ *
  26769. ++ * @SPECTRAL_DISABLED: spectral mode is disabled
  26770. ++ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
  26771. ++ * something else.
  26772. ++ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
  26773. ++ * is performed manually.
  26774. ++ */
  26775. ++enum ath10k_spectral_mode {
  26776. ++ SPECTRAL_DISABLED = 0,
  26777. ++ SPECTRAL_BACKGROUND,
  26778. ++ SPECTRAL_MANUAL,
  26779. ++};
  26780. ++
  26781. ++#ifdef CPTCFG_ATH10K_DEBUGFS
  26782. ++
  26783. ++int ath10k_spectral_process_fft(struct ath10k *ar,
  26784. ++ const struct wmi_phyerr *phyerr,
  26785. ++ const struct phyerr_fft_report *fftr,
  26786. ++ size_t bin_len, u64 tsf);
  26787. ++int ath10k_spectral_start(struct ath10k *ar);
  26788. ++int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
  26789. ++int ath10k_spectral_create(struct ath10k *ar);
  26790. ++void ath10k_spectral_destroy(struct ath10k *ar);
  26791. ++
  26792. ++#else
  26793. ++
  26794. ++static inline int
  26795. ++ath10k_spectral_process_fft(struct ath10k *ar,
  26796. ++ const struct wmi_phyerr *phyerr,
  26797. ++ const struct phyerr_fft_report *fftr,
  26798. ++ size_t bin_len, u64 tsf)
  26799. ++{
  26800. ++ return 0;
  26801. ++}
  26802. ++
  26803. ++static inline int ath10k_spectral_start(struct ath10k *ar)
  26804. ++{
  26805. ++ return 0;
  26806. ++}
  26807. ++
  26808. ++static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
  26809. ++{
  26810. ++ return 0;
  26811. ++}
  26812. ++
  26813. ++static inline int ath10k_spectral_create(struct ath10k *ar)
  26814. ++{
  26815. ++ return 0;
  26816. ++}
  26817. ++
  26818. ++static inline void ath10k_spectral_destroy(struct ath10k *ar)
  26819. ++{
  26820. ++}
  26821. ++
  26822. ++#endif /* CPTCFG_ATH10K_DEBUGFS */
  26823. ++
  26824. ++#endif /* SPECTRAL_H */
  26825. +--- /dev/null
  26826. ++++ b/drivers/net/wireless/ath/ath10k/testmode.c
  26827. +@@ -0,0 +1,385 @@
  26828. ++/*
  26829. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  26830. ++ *
  26831. ++ * Permission to use, copy, modify, and/or distribute this software for any
  26832. ++ * purpose with or without fee is hereby granted, provided that the above
  26833. ++ * copyright notice and this permission notice appear in all copies.
  26834. ++ *
  26835. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  26836. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26837. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  26838. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  26839. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  26840. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  26841. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  26842. ++ */
  26843. ++
  26844. ++#include "testmode.h"
  26845. ++
  26846. ++#include <net/netlink.h>
  26847. ++#include <linux/firmware.h>
  26848. ++
  26849. ++#include "debug.h"
  26850. ++#include "wmi.h"
  26851. ++#include "hif.h"
  26852. ++#include "hw.h"
  26853. ++
  26854. ++#include "testmode_i.h"
  26855. ++
  26856. ++static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
  26857. ++ [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
  26858. ++ [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
  26859. ++ .len = ATH10K_TM_DATA_MAX_LEN },
  26860. ++ [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
  26861. ++ [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
  26862. ++ [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
  26863. ++};
  26864. ++
  26865. ++/* Returns true if callee consumes the skb and the skb should be discarded.
  26866. ++ * Returns false if skb is not used. Does not sleep.
  26867. ++ */
  26868. ++bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
  26869. ++{
  26870. ++ struct sk_buff *nl_skb;
  26871. ++ bool consumed;
  26872. ++ int ret;
  26873. ++
  26874. ++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
  26875. ++ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
  26876. ++ cmd_id, skb, skb->len);
  26877. ++
  26878. ++ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
  26879. ++
  26880. ++ spin_lock_bh(&ar->data_lock);
  26881. ++
  26882. ++ if (!ar->testmode.utf_monitor) {
  26883. ++ consumed = false;
  26884. ++ goto out;
  26885. ++ }
  26886. ++
  26887. ++ /* Only testmode.c should be handling events from utf firmware,
  26888. ++ * otherwise all sort of problems will arise as mac80211 operations
  26889. ++ * are not initialised.
  26890. ++ */
  26891. ++ consumed = true;
  26892. ++
  26893. ++ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
  26894. ++ 2 * sizeof(u32) + skb->len,
  26895. ++ GFP_ATOMIC);
  26896. ++ if (!nl_skb) {
  26897. ++ ath10k_warn(ar,
  26898. ++ "failed to allocate skb for testmode wmi event\n");
  26899. ++ goto out;
  26900. ++ }
  26901. ++
  26902. ++ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
  26903. ++ if (ret) {
  26904. ++ ath10k_warn(ar,
  26905. ++ "failed to to put testmode wmi event cmd attribute: %d\n",
  26906. ++ ret);
  26907. ++ kfree_skb(nl_skb);
  26908. ++ goto out;
  26909. ++ }
  26910. ++
  26911. ++ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
  26912. ++ if (ret) {
  26913. ++ ath10k_warn(ar,
  26914. ++ "failed to to put testmode wmi even cmd_id: %d\n",
  26915. ++ ret);
  26916. ++ kfree_skb(nl_skb);
  26917. ++ goto out;
  26918. ++ }
  26919. ++
  26920. ++ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
  26921. ++ if (ret) {
  26922. ++ ath10k_warn(ar,
  26923. ++ "failed to copy skb to testmode wmi event: %d\n",
  26924. ++ ret);
  26925. ++ kfree_skb(nl_skb);
  26926. ++ goto out;
  26927. ++ }
  26928. ++
  26929. ++ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
  26930. ++
  26931. ++out:
  26932. ++ spin_unlock_bh(&ar->data_lock);
  26933. ++
  26934. ++ return consumed;
  26935. ++}
  26936. ++
  26937. ++static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
  26938. ++{
  26939. ++ struct sk_buff *skb;
  26940. ++ int ret;
  26941. ++
  26942. ++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
  26943. ++ "testmode cmd get version_major %d version_minor %d\n",
  26944. ++ ATH10K_TESTMODE_VERSION_MAJOR,
  26945. ++ ATH10K_TESTMODE_VERSION_MINOR);
  26946. ++
  26947. ++ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
  26948. ++ nla_total_size(sizeof(u32)));
  26949. ++ if (!skb)
  26950. ++ return -ENOMEM;
  26951. ++
  26952. ++ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
  26953. ++ ATH10K_TESTMODE_VERSION_MAJOR);
  26954. ++ if (ret) {
  26955. ++ kfree_skb(skb);
  26956. ++ return ret;
  26957. ++ }
  26958. ++
  26959. ++ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
  26960. ++ ATH10K_TESTMODE_VERSION_MINOR);
  26961. ++ if (ret) {
  26962. ++ kfree_skb(skb);
  26963. ++ return ret;
  26964. ++ }
  26965. ++
  26966. ++ return cfg80211_testmode_reply(skb);
  26967. ++}
  26968. ++
  26969. ++static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
  26970. ++{
  26971. ++ char filename[100];
  26972. ++ int ret;
  26973. ++
  26974. ++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
  26975. ++
  26976. ++ mutex_lock(&ar->conf_mutex);
  26977. ++
  26978. ++ if (ar->state == ATH10K_STATE_UTF) {
  26979. ++ ret = -EALREADY;
  26980. ++ goto err;
  26981. ++ }
  26982. ++
  26983. ++ /* start utf only when the driver is not in use */
  26984. ++ if (ar->state != ATH10K_STATE_OFF) {
  26985. ++ ret = -EBUSY;
  26986. ++ goto err;
  26987. ++ }
  26988. ++
  26989. ++ if (WARN_ON(ar->testmode.utf != NULL)) {
  26990. ++ /* utf image is already downloaded, it shouldn't be */
  26991. ++ ret = -EEXIST;
  26992. ++ goto err;
  26993. ++ }
  26994. ++
  26995. ++ snprintf(filename, sizeof(filename), "%s/%s",
  26996. ++ ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
  26997. ++
  26998. ++ /* load utf firmware image */
  26999. ++ ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
  27000. ++ if (ret) {
  27001. ++ ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
  27002. ++ filename, ret);
  27003. ++ goto err;
  27004. ++ }
  27005. ++
  27006. ++ spin_lock_bh(&ar->data_lock);
  27007. ++
  27008. ++ ar->testmode.utf_monitor = true;
  27009. ++
  27010. ++ spin_unlock_bh(&ar->data_lock);
  27011. ++
  27012. ++ BUILD_BUG_ON(sizeof(ar->fw_features) !=
  27013. ++ sizeof(ar->testmode.orig_fw_features));
  27014. ++
  27015. ++ memcpy(ar->testmode.orig_fw_features, ar->fw_features,
  27016. ++ sizeof(ar->fw_features));
  27017. ++ ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
  27018. ++
  27019. ++ /* utf.bin firmware image does not advertise firmware features. Do
  27020. ++ * an ugly hack where we force the firmware features so that wmi.c
  27021. ++ * will use the correct WMI interface.
  27022. ++ */
  27023. ++ memset(ar->fw_features, 0, sizeof(ar->fw_features));
  27024. ++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
  27025. ++
  27026. ++ ret = ath10k_hif_power_up(ar);
  27027. ++ if (ret) {
  27028. ++ ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
  27029. ++ ar->state = ATH10K_STATE_OFF;
  27030. ++ goto err_fw_features;
  27031. ++ }
  27032. ++
  27033. ++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
  27034. ++ if (ret) {
  27035. ++ ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
  27036. ++ ar->state = ATH10K_STATE_OFF;
  27037. ++ goto err_power_down;
  27038. ++ }
  27039. ++
  27040. ++ ar->state = ATH10K_STATE_UTF;
  27041. ++
  27042. ++ ath10k_info(ar, "UTF firmware started\n");
  27043. ++
  27044. ++ mutex_unlock(&ar->conf_mutex);
  27045. ++
  27046. ++ return 0;
  27047. ++
  27048. ++err_power_down:
  27049. ++ ath10k_hif_power_down(ar);
  27050. ++
  27051. ++err_fw_features:
  27052. ++ /* return the original firmware features */
  27053. ++ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
  27054. ++ sizeof(ar->fw_features));
  27055. ++ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
  27056. ++
  27057. ++ release_firmware(ar->testmode.utf);
  27058. ++ ar->testmode.utf = NULL;
  27059. ++
  27060. ++err:
  27061. ++ mutex_unlock(&ar->conf_mutex);
  27062. ++
  27063. ++ return ret;
  27064. ++}
  27065. ++
  27066. ++static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
  27067. ++{
  27068. ++ lockdep_assert_held(&ar->conf_mutex);
  27069. ++
  27070. ++ ath10k_core_stop(ar);
  27071. ++ ath10k_hif_power_down(ar);
  27072. ++
  27073. ++ spin_lock_bh(&ar->data_lock);
  27074. ++
  27075. ++ ar->testmode.utf_monitor = false;
  27076. ++
  27077. ++ spin_unlock_bh(&ar->data_lock);
  27078. ++
  27079. ++ /* return the original firmware features */
  27080. ++ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
  27081. ++ sizeof(ar->fw_features));
  27082. ++ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
  27083. ++
  27084. ++ release_firmware(ar->testmode.utf);
  27085. ++ ar->testmode.utf = NULL;
  27086. ++
  27087. ++ ar->state = ATH10K_STATE_OFF;
  27088. ++}
  27089. ++
  27090. ++static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
  27091. ++{
  27092. ++ int ret;
  27093. ++
  27094. ++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
  27095. ++
  27096. ++ mutex_lock(&ar->conf_mutex);
  27097. ++
  27098. ++ if (ar->state != ATH10K_STATE_UTF) {
  27099. ++ ret = -ENETDOWN;
  27100. ++ goto out;
  27101. ++ }
  27102. ++
  27103. ++ __ath10k_tm_cmd_utf_stop(ar);
  27104. ++
  27105. ++ ret = 0;
  27106. ++
  27107. ++ ath10k_info(ar, "UTF firmware stopped\n");
  27108. ++
  27109. ++out:
  27110. ++ mutex_unlock(&ar->conf_mutex);
  27111. ++ return ret;
  27112. ++}
  27113. ++
  27114. ++static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
  27115. ++{
  27116. ++ struct sk_buff *skb;
  27117. ++ int ret, buf_len;
  27118. ++ u32 cmd_id;
  27119. ++ void *buf;
  27120. ++
  27121. ++ mutex_lock(&ar->conf_mutex);
  27122. ++
  27123. ++ if (ar->state != ATH10K_STATE_UTF) {
  27124. ++ ret = -ENETDOWN;
  27125. ++ goto out;
  27126. ++ }
  27127. ++
  27128. ++ if (!tb[ATH10K_TM_ATTR_DATA]) {
  27129. ++ ret = -EINVAL;
  27130. ++ goto out;
  27131. ++ }
  27132. ++
  27133. ++ if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
  27134. ++ ret = -EINVAL;
  27135. ++ goto out;
  27136. ++ }
  27137. ++
  27138. ++ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
  27139. ++ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
  27140. ++ cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
  27141. ++
  27142. ++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
  27143. ++ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
  27144. ++ cmd_id, buf, buf_len);
  27145. ++
  27146. ++ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
  27147. ++
  27148. ++ skb = ath10k_wmi_alloc_skb(ar, buf_len);
  27149. ++ if (!skb) {
  27150. ++ ret = -ENOMEM;
  27151. ++ goto out;
  27152. ++ }
  27153. ++
  27154. ++ memcpy(skb->data, buf, buf_len);
  27155. ++
  27156. ++ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
  27157. ++ if (ret) {
  27158. ++ ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
  27159. ++ ret);
  27160. ++ goto out;
  27161. ++ }
  27162. ++
  27163. ++ ret = 0;
  27164. ++
  27165. ++out:
  27166. ++ mutex_unlock(&ar->conf_mutex);
  27167. ++ return ret;
  27168. ++}
  27169. ++
  27170. ++int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  27171. ++ void *data, int len)
  27172. ++{
  27173. ++ struct ath10k *ar = hw->priv;
  27174. ++ struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
  27175. ++ int ret;
  27176. ++
  27177. ++ ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
  27178. ++ ath10k_tm_policy);
  27179. ++ if (ret)
  27180. ++ return ret;
  27181. ++
  27182. ++ if (!tb[ATH10K_TM_ATTR_CMD])
  27183. ++ return -EINVAL;
  27184. ++
  27185. ++ switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
  27186. ++ case ATH10K_TM_CMD_GET_VERSION:
  27187. ++ return ath10k_tm_cmd_get_version(ar, tb);
  27188. ++ case ATH10K_TM_CMD_UTF_START:
  27189. ++ return ath10k_tm_cmd_utf_start(ar, tb);
  27190. ++ case ATH10K_TM_CMD_UTF_STOP:
  27191. ++ return ath10k_tm_cmd_utf_stop(ar, tb);
  27192. ++ case ATH10K_TM_CMD_WMI:
  27193. ++ return ath10k_tm_cmd_wmi(ar, tb);
  27194. ++ default:
  27195. ++ return -EOPNOTSUPP;
  27196. ++ }
  27197. ++}
  27198. ++
  27199. ++void ath10k_testmode_destroy(struct ath10k *ar)
  27200. ++{
  27201. ++ mutex_lock(&ar->conf_mutex);
  27202. ++
  27203. ++ if (ar->state != ATH10K_STATE_UTF) {
  27204. ++ /* utf firmware is not running, nothing to do */
  27205. ++ goto out;
  27206. ++ }
  27207. ++
  27208. ++ __ath10k_tm_cmd_utf_stop(ar);
  27209. ++
  27210. ++out:
  27211. ++ mutex_unlock(&ar->conf_mutex);
  27212. ++}
  27213. +--- /dev/null
  27214. ++++ b/drivers/net/wireless/ath/ath10k/testmode.h
  27215. +@@ -0,0 +1,46 @@
  27216. ++/*
  27217. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  27218. ++ *
  27219. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27220. ++ * purpose with or without fee is hereby granted, provided that the above
  27221. ++ * copyright notice and this permission notice appear in all copies.
  27222. ++ *
  27223. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27224. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27225. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27226. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27227. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27228. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27229. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27230. ++ */
  27231. ++
  27232. ++#include "core.h"
  27233. ++
  27234. ++#ifdef CPTCFG_NL80211_TESTMODE
  27235. ++
  27236. ++void ath10k_testmode_destroy(struct ath10k *ar);
  27237. ++
  27238. ++bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
  27239. ++int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  27240. ++ void *data, int len);
  27241. ++
  27242. ++#else
  27243. ++
  27244. ++static inline void ath10k_testmode_destroy(struct ath10k *ar)
  27245. ++{
  27246. ++}
  27247. ++
  27248. ++static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
  27249. ++ struct sk_buff *skb)
  27250. ++{
  27251. ++ return false;
  27252. ++}
  27253. ++
  27254. ++static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
  27255. ++ struct ieee80211_vif *vif,
  27256. ++ void *data, int len)
  27257. ++{
  27258. ++ return 0;
  27259. ++}
  27260. ++
  27261. ++#endif
  27262. +--- /dev/null
  27263. ++++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
  27264. +@@ -0,0 +1,70 @@
  27265. ++/*
  27266. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  27267. ++ *
  27268. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27269. ++ * purpose with or without fee is hereby granted, provided that the above
  27270. ++ * copyright notice and this permission notice appear in all copies.
  27271. ++ *
  27272. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27273. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27274. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27275. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27276. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27277. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27278. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27279. ++ */
  27280. ++
  27281. ++/* "API" level of the ath10k testmode interface. Bump it after every
  27282. ++ * incompatible interface change.
  27283. ++ */
  27284. ++#define ATH10K_TESTMODE_VERSION_MAJOR 1
  27285. ++
  27286. ++/* Bump this after every _compatible_ interface change, for example
  27287. ++ * addition of a new command or an attribute.
  27288. ++ */
  27289. ++#define ATH10K_TESTMODE_VERSION_MINOR 0
  27290. ++
  27291. ++#define ATH10K_TM_DATA_MAX_LEN 5000
  27292. ++
  27293. ++enum ath10k_tm_attr {
  27294. ++ __ATH10K_TM_ATTR_INVALID = 0,
  27295. ++ ATH10K_TM_ATTR_CMD = 1,
  27296. ++ ATH10K_TM_ATTR_DATA = 2,
  27297. ++ ATH10K_TM_ATTR_WMI_CMDID = 3,
  27298. ++ ATH10K_TM_ATTR_VERSION_MAJOR = 4,
  27299. ++ ATH10K_TM_ATTR_VERSION_MINOR = 5,
  27300. ++
  27301. ++ /* keep last */
  27302. ++ __ATH10K_TM_ATTR_AFTER_LAST,
  27303. ++ ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
  27304. ++};
  27305. ++
  27306. ++/* All ath10k testmode interface commands specified in
  27307. ++ * ATH10K_TM_ATTR_CMD
  27308. ++ */
  27309. ++enum ath10k_tm_cmd {
  27310. ++ /* Returns the supported ath10k testmode interface version in
  27311. ++ * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
  27312. ++ * uses this to verify it's using the correct version of the
  27313. ++ * testmode interface
  27314. ++ */
  27315. ++ ATH10K_TM_CMD_GET_VERSION = 0,
  27316. ++
  27317. ++ /* Boots the UTF firmware, the netdev interface must be down at the
  27318. ++ * time.
  27319. ++ */
  27320. ++ ATH10K_TM_CMD_UTF_START = 1,
  27321. ++
  27322. ++ /* Shuts down the UTF firmware and puts the driver back into OFF
  27323. ++ * state.
  27324. ++ */
  27325. ++ ATH10K_TM_CMD_UTF_STOP = 2,
  27326. ++
  27327. ++ /* The command used to transmit a WMI command to the firmware and
  27328. ++ * the event to receive WMI events from the firmware. Without
  27329. ++ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
  27330. ++ * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
  27331. ++ * ATH10K_TM_ATTR_DATA.
  27332. ++ */
  27333. ++ ATH10K_TM_CMD_WMI = 3,
  27334. ++};
  27335. +--- /dev/null
  27336. ++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
  27337. +@@ -0,0 +1,243 @@
  27338. ++/*
  27339. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  27340. ++ *
  27341. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27342. ++ * purpose with or without fee is hereby granted, provided that the above
  27343. ++ * copyright notice and this permission notice appear in all copies.
  27344. ++ *
  27345. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27346. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27347. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27348. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27349. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27350. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27351. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27352. ++ */
  27353. ++
  27354. ++#include "core.h"
  27355. ++#include "wmi-ops.h"
  27356. ++#include "debug.h"
  27357. ++
  27358. ++static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
  27359. ++ char __user *user_buf,
  27360. ++ size_t count, loff_t *ppos)
  27361. ++{
  27362. ++ struct ieee80211_sta *sta = file->private_data;
  27363. ++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  27364. ++ struct ath10k *ar = arsta->arvif->ar;
  27365. ++ char buf[32];
  27366. ++ int len = 0;
  27367. ++
  27368. ++ mutex_lock(&ar->conf_mutex);
  27369. ++ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
  27370. ++ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
  27371. ++ "auto" : "manual");
  27372. ++ mutex_unlock(&ar->conf_mutex);
  27373. ++
  27374. ++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  27375. ++}
  27376. ++
  27377. ++static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
  27378. ++ const char __user *user_buf,
  27379. ++ size_t count, loff_t *ppos)
  27380. ++{
  27381. ++ struct ieee80211_sta *sta = file->private_data;
  27382. ++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  27383. ++ struct ath10k *ar = arsta->arvif->ar;
  27384. ++ u32 aggr_mode;
  27385. ++ int ret;
  27386. ++
  27387. ++ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
  27388. ++ return -EINVAL;
  27389. ++
  27390. ++ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
  27391. ++ return -EINVAL;
  27392. ++
  27393. ++ mutex_lock(&ar->conf_mutex);
  27394. ++ if ((ar->state != ATH10K_STATE_ON) ||
  27395. ++ (aggr_mode == arsta->aggr_mode)) {
  27396. ++ ret = count;
  27397. ++ goto out;
  27398. ++ }
  27399. ++
  27400. ++ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
  27401. ++ if (ret) {
  27402. ++ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
  27403. ++ goto out;
  27404. ++ }
  27405. ++
  27406. ++ arsta->aggr_mode = aggr_mode;
  27407. ++out:
  27408. ++ mutex_unlock(&ar->conf_mutex);
  27409. ++ return ret;
  27410. ++}
  27411. ++
  27412. ++static const struct file_operations fops_aggr_mode = {
  27413. ++ .read = ath10k_dbg_sta_read_aggr_mode,
  27414. ++ .write = ath10k_dbg_sta_write_aggr_mode,
  27415. ++ .open = simple_open,
  27416. ++ .owner = THIS_MODULE,
  27417. ++ .llseek = default_llseek,
  27418. ++};
  27419. ++
  27420. ++static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
  27421. ++ const char __user *user_buf,
  27422. ++ size_t count, loff_t *ppos)
  27423. ++{
  27424. ++ struct ieee80211_sta *sta = file->private_data;
  27425. ++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  27426. ++ struct ath10k *ar = arsta->arvif->ar;
  27427. ++ u32 tid, buf_size;
  27428. ++ int ret;
  27429. ++ char buf[64];
  27430. ++
  27431. ++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
  27432. ++
  27433. ++ /* make sure that buf is null terminated */
  27434. ++ buf[sizeof(buf) - 1] = '\0';
  27435. ++
  27436. ++ ret = sscanf(buf, "%u %u", &tid, &buf_size);
  27437. ++ if (ret != 2)
  27438. ++ return -EINVAL;
  27439. ++
  27440. ++ /* Valid TID values are 0 through 15 */
  27441. ++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
  27442. ++ return -EINVAL;
  27443. ++
  27444. ++ mutex_lock(&ar->conf_mutex);
  27445. ++ if ((ar->state != ATH10K_STATE_ON) ||
  27446. ++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
  27447. ++ ret = count;
  27448. ++ goto out;
  27449. ++ }
  27450. ++
  27451. ++ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
  27452. ++ tid, buf_size);
  27453. ++ if (ret) {
  27454. ++ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
  27455. ++ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
  27456. ++ }
  27457. ++
  27458. ++ ret = count;
  27459. ++out:
  27460. ++ mutex_unlock(&ar->conf_mutex);
  27461. ++ return ret;
  27462. ++}
  27463. ++
  27464. ++static const struct file_operations fops_addba = {
  27465. ++ .write = ath10k_dbg_sta_write_addba,
  27466. ++ .open = simple_open,
  27467. ++ .owner = THIS_MODULE,
  27468. ++ .llseek = default_llseek,
  27469. ++};
  27470. ++
  27471. ++static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
  27472. ++ const char __user *user_buf,
  27473. ++ size_t count, loff_t *ppos)
  27474. ++{
  27475. ++ struct ieee80211_sta *sta = file->private_data;
  27476. ++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  27477. ++ struct ath10k *ar = arsta->arvif->ar;
  27478. ++ u32 tid, status;
  27479. ++ int ret;
  27480. ++ char buf[64];
  27481. ++
  27482. ++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
  27483. ++
  27484. ++ /* make sure that buf is null terminated */
  27485. ++ buf[sizeof(buf) - 1] = '\0';
  27486. ++
  27487. ++ ret = sscanf(buf, "%u %u", &tid, &status);
  27488. ++ if (ret != 2)
  27489. ++ return -EINVAL;
  27490. ++
  27491. ++ /* Valid TID values are 0 through 15 */
  27492. ++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
  27493. ++ return -EINVAL;
  27494. ++
  27495. ++ mutex_lock(&ar->conf_mutex);
  27496. ++ if ((ar->state != ATH10K_STATE_ON) ||
  27497. ++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
  27498. ++ ret = count;
  27499. ++ goto out;
  27500. ++ }
  27501. ++
  27502. ++ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
  27503. ++ tid, status);
  27504. ++ if (ret) {
  27505. ++ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
  27506. ++ arsta->arvif->vdev_id, sta->addr, tid, status);
  27507. ++ }
  27508. ++ ret = count;
  27509. ++out:
  27510. ++ mutex_unlock(&ar->conf_mutex);
  27511. ++ return ret;
  27512. ++}
  27513. ++
  27514. ++static const struct file_operations fops_addba_resp = {
  27515. ++ .write = ath10k_dbg_sta_write_addba_resp,
  27516. ++ .open = simple_open,
  27517. ++ .owner = THIS_MODULE,
  27518. ++ .llseek = default_llseek,
  27519. ++};
  27520. ++
  27521. ++static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
  27522. ++ const char __user *user_buf,
  27523. ++ size_t count, loff_t *ppos)
  27524. ++{
  27525. ++ struct ieee80211_sta *sta = file->private_data;
  27526. ++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  27527. ++ struct ath10k *ar = arsta->arvif->ar;
  27528. ++ u32 tid, initiator, reason;
  27529. ++ int ret;
  27530. ++ char buf[64];
  27531. ++
  27532. ++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
  27533. ++
  27534. ++ /* make sure that buf is null terminated */
  27535. ++ buf[sizeof(buf) - 1] = '\0';
  27536. ++
  27537. ++ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
  27538. ++ if (ret != 3)
  27539. ++ return -EINVAL;
  27540. ++
  27541. ++ /* Valid TID values are 0 through 15 */
  27542. ++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
  27543. ++ return -EINVAL;
  27544. ++
  27545. ++ mutex_lock(&ar->conf_mutex);
  27546. ++ if ((ar->state != ATH10K_STATE_ON) ||
  27547. ++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
  27548. ++ ret = count;
  27549. ++ goto out;
  27550. ++ }
  27551. ++
  27552. ++ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
  27553. ++ tid, initiator, reason);
  27554. ++ if (ret) {
  27555. ++ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
  27556. ++ arsta->arvif->vdev_id, sta->addr, tid, initiator,
  27557. ++ reason);
  27558. ++ }
  27559. ++ ret = count;
  27560. ++out:
  27561. ++ mutex_unlock(&ar->conf_mutex);
  27562. ++ return ret;
  27563. ++}
  27564. ++
  27565. ++static const struct file_operations fops_delba = {
  27566. ++ .write = ath10k_dbg_sta_write_delba,
  27567. ++ .open = simple_open,
  27568. ++ .owner = THIS_MODULE,
  27569. ++ .llseek = default_llseek,
  27570. ++};
  27571. ++
  27572. ++void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  27573. ++ struct ieee80211_sta *sta, struct dentry *dir)
  27574. ++{
  27575. ++ debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
  27576. ++ &fops_aggr_mode);
  27577. ++ debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
  27578. ++ debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
  27579. ++ debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
  27580. ++}
  27581. +--- /dev/null
  27582. ++++ b/drivers/net/wireless/ath/ath10k/hw.c
  27583. +@@ -0,0 +1,58 @@
  27584. ++/*
  27585. ++ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  27586. ++ *
  27587. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27588. ++ * purpose with or without fee is hereby granted, provided that the above
  27589. ++ * copyright notice and this permission notice appear in all copies.
  27590. ++ *
  27591. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27592. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27593. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27594. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27595. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27596. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27597. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27598. ++ */
  27599. ++
  27600. ++#include <linux/types.h>
  27601. ++#include "hw.h"
  27602. ++
  27603. ++const struct ath10k_hw_regs qca988x_regs = {
  27604. ++ .rtc_state_cold_reset_mask = 0x00000400,
  27605. ++ .rtc_soc_base_address = 0x00004000,
  27606. ++ .rtc_wmac_base_address = 0x00005000,
  27607. ++ .soc_core_base_address = 0x00009000,
  27608. ++ .ce_wrapper_base_address = 0x00057000,
  27609. ++ .ce0_base_address = 0x00057400,
  27610. ++ .ce1_base_address = 0x00057800,
  27611. ++ .ce2_base_address = 0x00057c00,
  27612. ++ .ce3_base_address = 0x00058000,
  27613. ++ .ce4_base_address = 0x00058400,
  27614. ++ .ce5_base_address = 0x00058800,
  27615. ++ .ce6_base_address = 0x00058c00,
  27616. ++ .ce7_base_address = 0x00059000,
  27617. ++ .soc_reset_control_si0_rst_mask = 0x00000001,
  27618. ++ .soc_reset_control_ce_rst_mask = 0x00040000,
  27619. ++ .soc_chip_id_address = 0x00ec,
  27620. ++ .scratch_3_address = 0x0030,
  27621. ++};
  27622. ++
  27623. ++const struct ath10k_hw_regs qca6174_regs = {
  27624. ++ .rtc_state_cold_reset_mask = 0x00002000,
  27625. ++ .rtc_soc_base_address = 0x00000800,
  27626. ++ .rtc_wmac_base_address = 0x00001000,
  27627. ++ .soc_core_base_address = 0x0003a000,
  27628. ++ .ce_wrapper_base_address = 0x00034000,
  27629. ++ .ce0_base_address = 0x00034400,
  27630. ++ .ce1_base_address = 0x00034800,
  27631. ++ .ce2_base_address = 0x00034c00,
  27632. ++ .ce3_base_address = 0x00035000,
  27633. ++ .ce4_base_address = 0x00035400,
  27634. ++ .ce5_base_address = 0x00035800,
  27635. ++ .ce6_base_address = 0x00035c00,
  27636. ++ .ce7_base_address = 0x00036000,
  27637. ++ .soc_reset_control_si0_rst_mask = 0x00000000,
  27638. ++ .soc_reset_control_ce_rst_mask = 0x00000001,
  27639. ++ .soc_chip_id_address = 0x000f0,
  27640. ++ .scratch_3_address = 0x0028,
  27641. ++};
  27642. +--- /dev/null
  27643. ++++ b/drivers/net/wireless/ath/ath10k/thermal.c
  27644. +@@ -0,0 +1,244 @@
  27645. ++/*
  27646. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  27647. ++ *
  27648. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27649. ++ * purpose with or without fee is hereby granted, provided that the above
  27650. ++ * copyright notice and this permission notice appear in all copies.
  27651. ++ *
  27652. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27653. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27654. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27655. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27656. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27657. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27658. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27659. ++ */
  27660. ++
  27661. ++#include <linux/device.h>
  27662. ++#include <linux/sysfs.h>
  27663. ++#include <linux/thermal.h>
  27664. ++#include <linux/hwmon.h>
  27665. ++#include <linux/hwmon-sysfs.h>
  27666. ++#include "core.h"
  27667. ++#include "debug.h"
  27668. ++#include "wmi-ops.h"
  27669. ++
  27670. ++static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
  27671. ++ enum wmi_vdev_type type)
  27672. ++{
  27673. ++ struct ath10k_vif *arvif;
  27674. ++ int count = 0;
  27675. ++
  27676. ++ lockdep_assert_held(&ar->conf_mutex);
  27677. ++
  27678. ++ list_for_each_entry(arvif, &ar->arvifs, list) {
  27679. ++ if (!arvif->is_started)
  27680. ++ continue;
  27681. ++
  27682. ++ if (!arvif->is_up)
  27683. ++ continue;
  27684. ++
  27685. ++ if (arvif->vdev_type != type)
  27686. ++ continue;
  27687. ++
  27688. ++ count++;
  27689. ++ }
  27690. ++ return count;
  27691. ++}
  27692. ++
  27693. ++static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
  27694. ++ unsigned long *state)
  27695. ++{
  27696. ++ *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
  27697. ++
  27698. ++ return 0;
  27699. ++}
  27700. ++
  27701. ++static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
  27702. ++ unsigned long *state)
  27703. ++{
  27704. ++ struct ath10k *ar = cdev->devdata;
  27705. ++
  27706. ++ mutex_lock(&ar->conf_mutex);
  27707. ++ *state = ar->thermal.duty_cycle;
  27708. ++ mutex_unlock(&ar->conf_mutex);
  27709. ++
  27710. ++ return 0;
  27711. ++}
  27712. ++
  27713. ++static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
  27714. ++ unsigned long duty_cycle)
  27715. ++{
  27716. ++ struct ath10k *ar = cdev->devdata;
  27717. ++ u32 period, duration, enabled;
  27718. ++ int num_bss, ret = 0;
  27719. ++
  27720. ++ mutex_lock(&ar->conf_mutex);
  27721. ++ if (ar->state != ATH10K_STATE_ON) {
  27722. ++ ret = -ENETDOWN;
  27723. ++ goto out;
  27724. ++ }
  27725. ++
  27726. ++ if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
  27727. ++ ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
  27728. ++ duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
  27729. ++ ret = -EINVAL;
  27730. ++ goto out;
  27731. ++ }
  27732. ++ /* TODO: Right now, thermal mitigation is handled only for single/multi
  27733. ++ * vif AP mode. Since quiet param is not validated in STA mode, it needs
  27734. ++ * to be investigated further to handle multi STA and multi-vif (AP+STA)
  27735. ++ * mode properly.
  27736. ++ */
  27737. ++ num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
  27738. ++ if (!num_bss) {
  27739. ++ ath10k_warn(ar, "no active AP interfaces\n");
  27740. ++ ret = -ENETDOWN;
  27741. ++ goto out;
  27742. ++ }
  27743. ++ period = max(ATH10K_QUIET_PERIOD_MIN,
  27744. ++ (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
  27745. ++ duration = (period * duty_cycle) / 100;
  27746. ++ enabled = duration ? 1 : 0;
  27747. ++
  27748. ++ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
  27749. ++ ATH10K_QUIET_START_OFFSET,
  27750. ++ enabled);
  27751. ++ if (ret) {
  27752. ++ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
  27753. ++ period, duration, enabled, ret);
  27754. ++ goto out;
  27755. ++ }
  27756. ++ ar->thermal.duty_cycle = duty_cycle;
  27757. ++out:
  27758. ++ mutex_unlock(&ar->conf_mutex);
  27759. ++ return ret;
  27760. ++}
  27761. ++
  27762. ++static struct thermal_cooling_device_ops ath10k_thermal_ops = {
  27763. ++ .get_max_state = ath10k_thermal_get_max_dutycycle,
  27764. ++ .get_cur_state = ath10k_thermal_get_cur_dutycycle,
  27765. ++ .set_cur_state = ath10k_thermal_set_cur_dutycycle,
  27766. ++};
  27767. ++
  27768. ++static ssize_t ath10k_thermal_show_temp(struct device *dev,
  27769. ++ struct device_attribute *attr,
  27770. ++ char *buf)
  27771. ++{
  27772. ++ struct ath10k *ar = dev_get_drvdata(dev);
  27773. ++ int ret, temperature;
  27774. ++
  27775. ++ mutex_lock(&ar->conf_mutex);
  27776. ++
  27777. ++ /* Can't get temperature when the card is off */
  27778. ++ if (ar->state != ATH10K_STATE_ON) {
  27779. ++ ret = -ENETDOWN;
  27780. ++ goto out;
  27781. ++ }
  27782. ++
  27783. ++ reinit_completion(&ar->thermal.wmi_sync);
  27784. ++ ret = ath10k_wmi_pdev_get_temperature(ar);
  27785. ++ if (ret) {
  27786. ++ ath10k_warn(ar, "failed to read temperature %d\n", ret);
  27787. ++ goto out;
  27788. ++ }
  27789. ++
  27790. ++ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
  27791. ++ ret = -ESHUTDOWN;
  27792. ++ goto out;
  27793. ++ }
  27794. ++
  27795. ++ ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
  27796. ++ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
  27797. ++ if (ret == 0) {
  27798. ++ ath10k_warn(ar, "failed to synchronize thermal read\n");
  27799. ++ ret = -ETIMEDOUT;
  27800. ++ goto out;
  27801. ++ }
  27802. ++
  27803. ++ spin_lock_bh(&ar->data_lock);
  27804. ++ temperature = ar->thermal.temperature;
  27805. ++ spin_unlock_bh(&ar->data_lock);
  27806. ++
  27807. ++ /* display in millidegree celcius */
  27808. ++ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
  27809. ++out:
  27810. ++ mutex_unlock(&ar->conf_mutex);
  27811. ++ return ret;
  27812. ++}
  27813. ++
  27814. ++void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
  27815. ++{
  27816. ++ spin_lock_bh(&ar->data_lock);
  27817. ++ ar->thermal.temperature = temperature;
  27818. ++ spin_unlock_bh(&ar->data_lock);
  27819. ++ complete(&ar->thermal.wmi_sync);
  27820. ++}
  27821. ++
  27822. ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
  27823. ++ NULL, 0);
  27824. ++
  27825. ++static struct attribute *ath10k_hwmon_attrs[] = {
  27826. ++ &sensor_dev_attr_temp1_input.dev_attr.attr,
  27827. ++ NULL,
  27828. ++};
  27829. ++ATTRIBUTE_GROUPS(ath10k_hwmon);
  27830. ++
  27831. ++int ath10k_thermal_register(struct ath10k *ar)
  27832. ++{
  27833. ++ struct thermal_cooling_device *cdev;
  27834. ++ struct device *hwmon_dev;
  27835. ++ int ret;
  27836. ++
  27837. ++ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
  27838. ++ &ath10k_thermal_ops);
  27839. ++
  27840. ++ if (IS_ERR(cdev)) {
  27841. ++ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
  27842. ++ PTR_ERR(cdev));
  27843. ++ return -EINVAL;
  27844. ++ }
  27845. ++
  27846. ++ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
  27847. ++ "cooling_device");
  27848. ++ if (ret) {
  27849. ++ ath10k_err(ar, "failed to create thermal symlink\n");
  27850. ++ goto err_cooling_destroy;
  27851. ++ }
  27852. ++
  27853. ++ ar->thermal.cdev = cdev;
  27854. ++
  27855. ++ /* Do not register hwmon device when temperature reading is not
  27856. ++ * supported by firmware
  27857. ++ */
  27858. ++ if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
  27859. ++ return 0;
  27860. ++
  27861. ++ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
  27862. ++ * guess linux/hwmon.h is missing proper stubs. */
  27863. ++ if (!config_enabled(CPTCFG_HWMON))
  27864. ++ return 0;
  27865. ++
  27866. ++ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
  27867. ++ "ath10k_hwmon", ar,
  27868. ++ ath10k_hwmon_groups);
  27869. ++ if (IS_ERR(hwmon_dev)) {
  27870. ++ ath10k_err(ar, "failed to register hwmon device: %ld\n",
  27871. ++ PTR_ERR(hwmon_dev));
  27872. ++ ret = -EINVAL;
  27873. ++ goto err_remove_link;
  27874. ++ }
  27875. ++ return 0;
  27876. ++
  27877. ++err_remove_link:
  27878. ++ sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
  27879. ++err_cooling_destroy:
  27880. ++ thermal_cooling_device_unregister(cdev);
  27881. ++ return ret;
  27882. ++}
  27883. ++
  27884. ++void ath10k_thermal_unregister(struct ath10k *ar)
  27885. ++{
  27886. ++ thermal_cooling_device_unregister(ar->thermal.cdev);
  27887. ++ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
  27888. ++}
  27889. +--- /dev/null
  27890. ++++ b/drivers/net/wireless/ath/ath10k/thermal.h
  27891. +@@ -0,0 +1,58 @@
  27892. ++/*
  27893. ++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
  27894. ++ *
  27895. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27896. ++ * purpose with or without fee is hereby granted, provided that the above
  27897. ++ * copyright notice and this permission notice appear in all copies.
  27898. ++ *
  27899. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27900. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27901. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27902. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27903. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27904. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27905. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27906. ++ */
  27907. ++#ifndef _THERMAL_
  27908. ++#define _THERMAL_
  27909. ++
  27910. ++#define ATH10K_QUIET_PERIOD_DEFAULT 100
  27911. ++#define ATH10K_QUIET_PERIOD_MIN 25
  27912. ++#define ATH10K_QUIET_START_OFFSET 10
  27913. ++#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
  27914. ++#define ATH10K_HWMON_NAME_LEN 15
  27915. ++#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
  27916. ++
  27917. ++struct ath10k_thermal {
  27918. ++ struct thermal_cooling_device *cdev;
  27919. ++ struct completion wmi_sync;
  27920. ++
  27921. ++ /* protected by conf_mutex */
  27922. ++ u32 duty_cycle;
  27923. ++ /* temperature value in Celcius degree
  27924. ++ * protected by data_lock
  27925. ++ */
  27926. ++ int temperature;
  27927. ++};
  27928. ++
  27929. ++#ifdef CONFIG_THERMAL
  27930. ++int ath10k_thermal_register(struct ath10k *ar);
  27931. ++void ath10k_thermal_unregister(struct ath10k *ar);
  27932. ++void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
  27933. ++#else
  27934. ++static inline int ath10k_thermal_register(struct ath10k *ar)
  27935. ++{
  27936. ++ return 0;
  27937. ++}
  27938. ++
  27939. ++static inline void ath10k_thermal_unregister(struct ath10k *ar)
  27940. ++{
  27941. ++}
  27942. ++
  27943. ++static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
  27944. ++ int temperature)
  27945. ++{
  27946. ++}
  27947. ++
  27948. ++#endif
  27949. ++#endif /* _THERMAL_ */
  27950. +--- /dev/null
  27951. ++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
  27952. +@@ -0,0 +1,1063 @@
  27953. ++/*
  27954. ++ * Copyright (c) 2005-2011 Atheros Communications Inc.
  27955. ++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  27956. ++ *
  27957. ++ * Permission to use, copy, modify, and/or distribute this software for any
  27958. ++ * purpose with or without fee is hereby granted, provided that the above
  27959. ++ * copyright notice and this permission notice appear in all copies.
  27960. ++ *
  27961. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27962. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  27963. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27964. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  27965. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  27966. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  27967. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  27968. ++ */
  27969. ++
  27970. ++#ifndef _WMI_OPS_H_
  27971. ++#define _WMI_OPS_H_
  27972. ++
  27973. ++struct ath10k;
  27974. ++struct sk_buff;
  27975. ++
  27976. ++struct wmi_ops {
  27977. ++ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  27978. ++ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  27979. ++
  27980. ++ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  27981. ++ struct wmi_scan_ev_arg *arg);
  27982. ++ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  27983. ++ struct wmi_mgmt_rx_ev_arg *arg);
  27984. ++ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  27985. ++ struct wmi_ch_info_ev_arg *arg);
  27986. ++ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  27987. ++ struct wmi_vdev_start_ev_arg *arg);
  27988. ++ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  27989. ++ struct wmi_peer_kick_ev_arg *arg);
  27990. ++ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  27991. ++ struct wmi_swba_ev_arg *arg);
  27992. ++ int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
  27993. ++ struct wmi_phyerr_ev_arg *arg);
  27994. ++ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  27995. ++ struct wmi_svc_rdy_ev_arg *arg);
  27996. ++ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  27997. ++ struct wmi_rdy_ev_arg *arg);
  27998. ++ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  27999. ++ struct ath10k_fw_stats *stats);
  28000. ++
  28001. ++ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  28002. ++ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  28003. ++ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  28004. ++ u16 rd5g, u16 ctl2g, u16 ctl5g,
  28005. ++ enum wmi_dfs_region dfs_reg);
  28006. ++ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  28007. ++ u32 value);
  28008. ++ struct sk_buff *(*gen_init)(struct ath10k *ar);
  28009. ++ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  28010. ++ const struct wmi_start_scan_arg *arg);
  28011. ++ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  28012. ++ const struct wmi_stop_scan_arg *arg);
  28013. ++ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  28014. ++ enum wmi_vdev_type type,
  28015. ++ enum wmi_vdev_subtype subtype,
  28016. ++ const u8 macaddr[ETH_ALEN]);
  28017. ++ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  28018. ++ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  28019. ++ const struct wmi_vdev_start_request_arg *arg,
  28020. ++ bool restart);
  28021. ++ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  28022. ++ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  28023. ++ const u8 *bssid);
  28024. ++ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  28025. ++ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  28026. ++ u32 param_id, u32 param_value);
  28027. ++ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  28028. ++ const struct wmi_vdev_install_key_arg *arg);
  28029. ++ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  28030. ++ const struct wmi_vdev_spectral_conf_arg *arg);
  28031. ++ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  28032. ++ u32 trigger, u32 enable);
  28033. ++ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  28034. ++ const struct wmi_wmm_params_all_arg *arg);
  28035. ++ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  28036. ++ const u8 peer_addr[ETH_ALEN]);
  28037. ++ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  28038. ++ const u8 peer_addr[ETH_ALEN]);
  28039. ++ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  28040. ++ const u8 peer_addr[ETH_ALEN],
  28041. ++ u32 tid_bitmap);
  28042. ++ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  28043. ++ const u8 *peer_addr,
  28044. ++ enum wmi_peer_param param_id,
  28045. ++ u32 param_value);
  28046. ++ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  28047. ++ const struct wmi_peer_assoc_complete_arg *arg);
  28048. ++ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  28049. ++ enum wmi_sta_ps_mode psmode);
  28050. ++ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  28051. ++ enum wmi_sta_powersave_param param_id,
  28052. ++ u32 value);
  28053. ++ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  28054. ++ const u8 *mac,
  28055. ++ enum wmi_ap_ps_peer_param param_id,
  28056. ++ u32 value);
  28057. ++ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  28058. ++ const struct wmi_scan_chan_list_arg *arg);
  28059. ++ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  28060. ++ const void *bcn, size_t bcn_len,
  28061. ++ u32 bcn_paddr, bool dtim_zero,
  28062. ++ bool deliver_cab);
  28063. ++ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  28064. ++ const struct wmi_wmm_params_all_arg *arg);
  28065. ++ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  28066. ++ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  28067. ++ enum wmi_force_fw_hang_type type,
  28068. ++ u32 delay_ms);
  28069. ++ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  28070. ++ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
  28071. ++ u32 log_level);
  28072. ++ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  28073. ++ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  28074. ++ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  28075. ++ u32 period, u32 duration,
  28076. ++ u32 next_offset,
  28077. ++ u32 enabled);
  28078. ++ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  28079. ++ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  28080. ++ const u8 *mac);
  28081. ++ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  28082. ++ const u8 *mac, u32 tid, u32 buf_size);
  28083. ++ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  28084. ++ const u8 *mac, u32 tid,
  28085. ++ u32 status);
  28086. ++ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  28087. ++ const u8 *mac, u32 tid, u32 initiator,
  28088. ++ u32 reason);
  28089. ++ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  28090. ++ u32 tim_ie_offset, struct sk_buff *bcn,
  28091. ++ u32 prb_caps, u32 prb_erp,
  28092. ++ void *prb_ies, size_t prb_ies_len);
  28093. ++ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  28094. ++ struct sk_buff *bcn);
  28095. ++ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  28096. ++ const u8 *p2p_ie);
  28097. ++ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  28098. ++ const u8 peer_addr[ETH_ALEN],
  28099. ++ const struct wmi_sta_uapsd_auto_trig_arg *args,
  28100. ++ u32 num_ac);
  28101. ++ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  28102. ++ const struct wmi_sta_keepalive_arg *arg);
  28103. ++};
  28104. ++
  28105. ++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  28106. ++
  28107. ++static inline int
  28108. ++ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  28109. ++{
  28110. ++ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  28111. ++ return -EOPNOTSUPP;
  28112. ++
  28113. ++ ar->wmi.ops->rx(ar, skb);
  28114. ++ return 0;
  28115. ++}
  28116. ++
  28117. ++static inline int
  28118. ++ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  28119. ++ size_t len)
  28120. ++{
  28121. ++ if (!ar->wmi.ops->map_svc)
  28122. ++ return -EOPNOTSUPP;
  28123. ++
  28124. ++ ar->wmi.ops->map_svc(in, out, len);
  28125. ++ return 0;
  28126. ++}
  28127. ++
  28128. ++static inline int
  28129. ++ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  28130. ++ struct wmi_scan_ev_arg *arg)
  28131. ++{
  28132. ++ if (!ar->wmi.ops->pull_scan)
  28133. ++ return -EOPNOTSUPP;
  28134. ++
  28135. ++ return ar->wmi.ops->pull_scan(ar, skb, arg);
  28136. ++}
  28137. ++
  28138. ++static inline int
  28139. ++ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  28140. ++ struct wmi_mgmt_rx_ev_arg *arg)
  28141. ++{
  28142. ++ if (!ar->wmi.ops->pull_mgmt_rx)
  28143. ++ return -EOPNOTSUPP;
  28144. ++
  28145. ++ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  28146. ++}
  28147. ++
  28148. ++static inline int
  28149. ++ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  28150. ++ struct wmi_ch_info_ev_arg *arg)
  28151. ++{
  28152. ++ if (!ar->wmi.ops->pull_ch_info)
  28153. ++ return -EOPNOTSUPP;
  28154. ++
  28155. ++ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  28156. ++}
  28157. ++
  28158. ++static inline int
  28159. ++ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  28160. ++ struct wmi_vdev_start_ev_arg *arg)
  28161. ++{
  28162. ++ if (!ar->wmi.ops->pull_vdev_start)
  28163. ++ return -EOPNOTSUPP;
  28164. ++
  28165. ++ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  28166. ++}
  28167. ++
  28168. ++static inline int
  28169. ++ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  28170. ++ struct wmi_peer_kick_ev_arg *arg)
  28171. ++{
  28172. ++ if (!ar->wmi.ops->pull_peer_kick)
  28173. ++ return -EOPNOTSUPP;
  28174. ++
  28175. ++ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  28176. ++}
  28177. ++
  28178. ++static inline int
  28179. ++ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  28180. ++ struct wmi_swba_ev_arg *arg)
  28181. ++{
  28182. ++ if (!ar->wmi.ops->pull_swba)
  28183. ++ return -EOPNOTSUPP;
  28184. ++
  28185. ++ return ar->wmi.ops->pull_swba(ar, skb, arg);
  28186. ++}
  28187. ++
  28188. ++static inline int
  28189. ++ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
  28190. ++ struct wmi_phyerr_ev_arg *arg)
  28191. ++{
  28192. ++ if (!ar->wmi.ops->pull_phyerr)
  28193. ++ return -EOPNOTSUPP;
  28194. ++
  28195. ++ return ar->wmi.ops->pull_phyerr(ar, skb, arg);
  28196. ++}
  28197. ++
  28198. ++static inline int
  28199. ++ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  28200. ++ struct wmi_svc_rdy_ev_arg *arg)
  28201. ++{
  28202. ++ if (!ar->wmi.ops->pull_svc_rdy)
  28203. ++ return -EOPNOTSUPP;
  28204. ++
  28205. ++ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  28206. ++}
  28207. ++
  28208. ++static inline int
  28209. ++ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  28210. ++ struct wmi_rdy_ev_arg *arg)
  28211. ++{
  28212. ++ if (!ar->wmi.ops->pull_rdy)
  28213. ++ return -EOPNOTSUPP;
  28214. ++
  28215. ++ return ar->wmi.ops->pull_rdy(ar, skb, arg);
  28216. ++}
  28217. ++
  28218. ++static inline int
  28219. ++ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  28220. ++ struct ath10k_fw_stats *stats)
  28221. ++{
  28222. ++ if (!ar->wmi.ops->pull_fw_stats)
  28223. ++ return -EOPNOTSUPP;
  28224. ++
  28225. ++ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  28226. ++}
  28227. ++
  28228. ++static inline int
  28229. ++ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  28230. ++{
  28231. ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  28232. ++ struct sk_buff *skb;
  28233. ++ int ret;
  28234. ++
  28235. ++ if (!ar->wmi.ops->gen_mgmt_tx)
  28236. ++ return -EOPNOTSUPP;
  28237. ++
  28238. ++ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  28239. ++ if (IS_ERR(skb))
  28240. ++ return PTR_ERR(skb);
  28241. ++
  28242. ++ ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
  28243. ++ if (ret)
  28244. ++ return ret;
  28245. ++
  28246. ++ /* FIXME There's no ACK event for Management Tx. This probably
  28247. ++ * shouldn't be called here either. */
  28248. ++ info->flags |= IEEE80211_TX_STAT_ACK;
  28249. ++ ieee80211_tx_status_irqsafe(ar->hw, msdu);
  28250. ++
  28251. ++ return 0;
  28252. ++}
  28253. ++
  28254. ++static inline int
  28255. ++ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  28256. ++ u16 ctl2g, u16 ctl5g,
  28257. ++ enum wmi_dfs_region dfs_reg)
  28258. ++{
  28259. ++ struct sk_buff *skb;
  28260. ++
  28261. ++ if (!ar->wmi.ops->gen_pdev_set_rd)
  28262. ++ return -EOPNOTSUPP;
  28263. ++
  28264. ++ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  28265. ++ dfs_reg);
  28266. ++ if (IS_ERR(skb))
  28267. ++ return PTR_ERR(skb);
  28268. ++
  28269. ++ return ath10k_wmi_cmd_send(ar, skb,
  28270. ++ ar->wmi.cmd->pdev_set_regdomain_cmdid);
  28271. ++}
  28272. ++
  28273. ++static inline int
  28274. ++ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  28275. ++{
  28276. ++ struct sk_buff *skb;
  28277. ++
  28278. ++ if (!ar->wmi.ops->gen_pdev_suspend)
  28279. ++ return -EOPNOTSUPP;
  28280. ++
  28281. ++ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  28282. ++ if (IS_ERR(skb))
  28283. ++ return PTR_ERR(skb);
  28284. ++
  28285. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  28286. ++}
  28287. ++
  28288. ++static inline int
  28289. ++ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  28290. ++{
  28291. ++ struct sk_buff *skb;
  28292. ++
  28293. ++ if (!ar->wmi.ops->gen_pdev_resume)
  28294. ++ return -EOPNOTSUPP;
  28295. ++
  28296. ++ skb = ar->wmi.ops->gen_pdev_resume(ar);
  28297. ++ if (IS_ERR(skb))
  28298. ++ return PTR_ERR(skb);
  28299. ++
  28300. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  28301. ++}
  28302. ++
  28303. ++static inline int
  28304. ++ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  28305. ++{
  28306. ++ struct sk_buff *skb;
  28307. ++
  28308. ++ if (!ar->wmi.ops->gen_pdev_set_param)
  28309. ++ return -EOPNOTSUPP;
  28310. ++
  28311. ++ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  28312. ++ if (IS_ERR(skb))
  28313. ++ return PTR_ERR(skb);
  28314. ++
  28315. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  28316. ++}
  28317. ++
  28318. ++static inline int
  28319. ++ath10k_wmi_cmd_init(struct ath10k *ar)
  28320. ++{
  28321. ++ struct sk_buff *skb;
  28322. ++
  28323. ++ if (!ar->wmi.ops->gen_init)
  28324. ++ return -EOPNOTSUPP;
  28325. ++
  28326. ++ skb = ar->wmi.ops->gen_init(ar);
  28327. ++ if (IS_ERR(skb))
  28328. ++ return PTR_ERR(skb);
  28329. ++
  28330. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  28331. ++}
  28332. ++
  28333. ++static inline int
  28334. ++ath10k_wmi_start_scan(struct ath10k *ar,
  28335. ++ const struct wmi_start_scan_arg *arg)
  28336. ++{
  28337. ++ struct sk_buff *skb;
  28338. ++
  28339. ++ if (!ar->wmi.ops->gen_start_scan)
  28340. ++ return -EOPNOTSUPP;
  28341. ++
  28342. ++ skb = ar->wmi.ops->gen_start_scan(ar, arg);
  28343. ++ if (IS_ERR(skb))
  28344. ++ return PTR_ERR(skb);
  28345. ++
  28346. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  28347. ++}
  28348. ++
  28349. ++static inline int
  28350. ++ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  28351. ++{
  28352. ++ struct sk_buff *skb;
  28353. ++
  28354. ++ if (!ar->wmi.ops->gen_stop_scan)
  28355. ++ return -EOPNOTSUPP;
  28356. ++
  28357. ++ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  28358. ++ if (IS_ERR(skb))
  28359. ++ return PTR_ERR(skb);
  28360. ++
  28361. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  28362. ++}
  28363. ++
  28364. ++static inline int
  28365. ++ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  28366. ++ enum wmi_vdev_type type,
  28367. ++ enum wmi_vdev_subtype subtype,
  28368. ++ const u8 macaddr[ETH_ALEN])
  28369. ++{
  28370. ++ struct sk_buff *skb;
  28371. ++
  28372. ++ if (!ar->wmi.ops->gen_vdev_create)
  28373. ++ return -EOPNOTSUPP;
  28374. ++
  28375. ++ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  28376. ++ if (IS_ERR(skb))
  28377. ++ return PTR_ERR(skb);
  28378. ++
  28379. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  28380. ++}
  28381. ++
  28382. ++static inline int
  28383. ++ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  28384. ++{
  28385. ++ struct sk_buff *skb;
  28386. ++
  28387. ++ if (!ar->wmi.ops->gen_vdev_delete)
  28388. ++ return -EOPNOTSUPP;
  28389. ++
  28390. ++ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  28391. ++ if (IS_ERR(skb))
  28392. ++ return PTR_ERR(skb);
  28393. ++
  28394. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  28395. ++}
  28396. ++
  28397. ++static inline int
  28398. ++ath10k_wmi_vdev_start(struct ath10k *ar,
  28399. ++ const struct wmi_vdev_start_request_arg *arg)
  28400. ++{
  28401. ++ struct sk_buff *skb;
  28402. ++
  28403. ++ if (!ar->wmi.ops->gen_vdev_start)
  28404. ++ return -EOPNOTSUPP;
  28405. ++
  28406. ++ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  28407. ++ if (IS_ERR(skb))
  28408. ++ return PTR_ERR(skb);
  28409. ++
  28410. ++ return ath10k_wmi_cmd_send(ar, skb,
  28411. ++ ar->wmi.cmd->vdev_start_request_cmdid);
  28412. ++}
  28413. ++
  28414. ++static inline int
  28415. ++ath10k_wmi_vdev_restart(struct ath10k *ar,
  28416. ++ const struct wmi_vdev_start_request_arg *arg)
  28417. ++{
  28418. ++ struct sk_buff *skb;
  28419. ++
  28420. ++ if (!ar->wmi.ops->gen_vdev_start)
  28421. ++ return -EOPNOTSUPP;
  28422. ++
  28423. ++ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  28424. ++ if (IS_ERR(skb))
  28425. ++ return PTR_ERR(skb);
  28426. ++
  28427. ++ return ath10k_wmi_cmd_send(ar, skb,
  28428. ++ ar->wmi.cmd->vdev_restart_request_cmdid);
  28429. ++}
  28430. ++
  28431. ++static inline int
  28432. ++ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  28433. ++{
  28434. ++ struct sk_buff *skb;
  28435. ++
  28436. ++ if (!ar->wmi.ops->gen_vdev_stop)
  28437. ++ return -EOPNOTSUPP;
  28438. ++
  28439. ++ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  28440. ++ if (IS_ERR(skb))
  28441. ++ return PTR_ERR(skb);
  28442. ++
  28443. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  28444. ++}
  28445. ++
  28446. ++static inline int
  28447. ++ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  28448. ++{
  28449. ++ struct sk_buff *skb;
  28450. ++
  28451. ++ if (!ar->wmi.ops->gen_vdev_up)
  28452. ++ return -EOPNOTSUPP;
  28453. ++
  28454. ++ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  28455. ++ if (IS_ERR(skb))
  28456. ++ return PTR_ERR(skb);
  28457. ++
  28458. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  28459. ++}
  28460. ++
  28461. ++static inline int
  28462. ++ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  28463. ++{
  28464. ++ struct sk_buff *skb;
  28465. ++
  28466. ++ if (!ar->wmi.ops->gen_vdev_down)
  28467. ++ return -EOPNOTSUPP;
  28468. ++
  28469. ++ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  28470. ++ if (IS_ERR(skb))
  28471. ++ return PTR_ERR(skb);
  28472. ++
  28473. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  28474. ++}
  28475. ++
  28476. ++static inline int
  28477. ++ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  28478. ++ u32 param_value)
  28479. ++{
  28480. ++ struct sk_buff *skb;
  28481. ++
  28482. ++ if (!ar->wmi.ops->gen_vdev_set_param)
  28483. ++ return -EOPNOTSUPP;
  28484. ++
  28485. ++ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  28486. ++ param_value);
  28487. ++ if (IS_ERR(skb))
  28488. ++ return PTR_ERR(skb);
  28489. ++
  28490. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  28491. ++}
  28492. ++
  28493. ++static inline int
  28494. ++ath10k_wmi_vdev_install_key(struct ath10k *ar,
  28495. ++ const struct wmi_vdev_install_key_arg *arg)
  28496. ++{
  28497. ++ struct sk_buff *skb;
  28498. ++
  28499. ++ if (!ar->wmi.ops->gen_vdev_install_key)
  28500. ++ return -EOPNOTSUPP;
  28501. ++
  28502. ++ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  28503. ++ if (IS_ERR(skb))
  28504. ++ return PTR_ERR(skb);
  28505. ++
  28506. ++ return ath10k_wmi_cmd_send(ar, skb,
  28507. ++ ar->wmi.cmd->vdev_install_key_cmdid);
  28508. ++}
  28509. ++
  28510. ++static inline int
  28511. ++ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  28512. ++ const struct wmi_vdev_spectral_conf_arg *arg)
  28513. ++{
  28514. ++ struct sk_buff *skb;
  28515. ++ u32 cmd_id;
  28516. ++
  28517. ++ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  28518. ++ if (IS_ERR(skb))
  28519. ++ return PTR_ERR(skb);
  28520. ++
  28521. ++ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  28522. ++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  28523. ++}
  28524. ++
  28525. ++static inline int
  28526. ++ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  28527. ++ u32 enable)
  28528. ++{
  28529. ++ struct sk_buff *skb;
  28530. ++ u32 cmd_id;
  28531. ++
  28532. ++ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  28533. ++ enable);
  28534. ++ if (IS_ERR(skb))
  28535. ++ return PTR_ERR(skb);
  28536. ++
  28537. ++ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  28538. ++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  28539. ++}
  28540. ++
  28541. ++static inline int
  28542. ++ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  28543. ++ const u8 peer_addr[ETH_ALEN],
  28544. ++ const struct wmi_sta_uapsd_auto_trig_arg *args,
  28545. ++ u32 num_ac)
  28546. ++{
  28547. ++ struct sk_buff *skb;
  28548. ++ u32 cmd_id;
  28549. ++
  28550. ++ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  28551. ++ return -EOPNOTSUPP;
  28552. ++
  28553. ++ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  28554. ++ num_ac);
  28555. ++ if (IS_ERR(skb))
  28556. ++ return PTR_ERR(skb);
  28557. ++
  28558. ++ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  28559. ++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  28560. ++}
  28561. ++
  28562. ++static inline int
  28563. ++ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  28564. ++ const struct wmi_wmm_params_all_arg *arg)
  28565. ++{
  28566. ++ struct sk_buff *skb;
  28567. ++ u32 cmd_id;
  28568. ++
  28569. ++ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  28570. ++ if (IS_ERR(skb))
  28571. ++ return PTR_ERR(skb);
  28572. ++
  28573. ++ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  28574. ++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  28575. ++}
  28576. ++
  28577. ++static inline int
  28578. ++ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  28579. ++ const u8 peer_addr[ETH_ALEN])
  28580. ++{
  28581. ++ struct sk_buff *skb;
  28582. ++
  28583. ++ if (!ar->wmi.ops->gen_peer_create)
  28584. ++ return -EOPNOTSUPP;
  28585. ++
  28586. ++ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
  28587. ++ if (IS_ERR(skb))
  28588. ++ return PTR_ERR(skb);
  28589. ++
  28590. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  28591. ++}
  28592. ++
  28593. ++static inline int
  28594. ++ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  28595. ++ const u8 peer_addr[ETH_ALEN])
  28596. ++{
  28597. ++ struct sk_buff *skb;
  28598. ++
  28599. ++ if (!ar->wmi.ops->gen_peer_delete)
  28600. ++ return -EOPNOTSUPP;
  28601. ++
  28602. ++ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  28603. ++ if (IS_ERR(skb))
  28604. ++ return PTR_ERR(skb);
  28605. ++
  28606. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  28607. ++}
  28608. ++
  28609. ++static inline int
  28610. ++ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  28611. ++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  28612. ++{
  28613. ++ struct sk_buff *skb;
  28614. ++
  28615. ++ if (!ar->wmi.ops->gen_peer_flush)
  28616. ++ return -EOPNOTSUPP;
  28617. ++
  28618. ++ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  28619. ++ if (IS_ERR(skb))
  28620. ++ return PTR_ERR(skb);
  28621. ++
  28622. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  28623. ++}
  28624. ++
  28625. ++static inline int
  28626. ++ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  28627. ++ enum wmi_peer_param param_id, u32 param_value)
  28628. ++{
  28629. ++ struct sk_buff *skb;
  28630. ++
  28631. ++ if (!ar->wmi.ops->gen_peer_set_param)
  28632. ++ return -EOPNOTSUPP;
  28633. ++
  28634. ++ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  28635. ++ param_value);
  28636. ++ if (IS_ERR(skb))
  28637. ++ return PTR_ERR(skb);
  28638. ++
  28639. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  28640. ++}
  28641. ++
  28642. ++static inline int
  28643. ++ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  28644. ++ enum wmi_sta_ps_mode psmode)
  28645. ++{
  28646. ++ struct sk_buff *skb;
  28647. ++
  28648. ++ if (!ar->wmi.ops->gen_set_psmode)
  28649. ++ return -EOPNOTSUPP;
  28650. ++
  28651. ++ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  28652. ++ if (IS_ERR(skb))
  28653. ++ return PTR_ERR(skb);
  28654. ++
  28655. ++ return ath10k_wmi_cmd_send(ar, skb,
  28656. ++ ar->wmi.cmd->sta_powersave_mode_cmdid);
  28657. ++}
  28658. ++
  28659. ++static inline int
  28660. ++ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  28661. ++ enum wmi_sta_powersave_param param_id, u32 value)
  28662. ++{
  28663. ++ struct sk_buff *skb;
  28664. ++
  28665. ++ if (!ar->wmi.ops->gen_set_sta_ps)
  28666. ++ return -EOPNOTSUPP;
  28667. ++
  28668. ++ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  28669. ++ if (IS_ERR(skb))
  28670. ++ return PTR_ERR(skb);
  28671. ++
  28672. ++ return ath10k_wmi_cmd_send(ar, skb,
  28673. ++ ar->wmi.cmd->sta_powersave_param_cmdid);
  28674. ++}
  28675. ++
  28676. ++static inline int
  28677. ++ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  28678. ++ enum wmi_ap_ps_peer_param param_id, u32 value)
  28679. ++{
  28680. ++ struct sk_buff *skb;
  28681. ++
  28682. ++ if (!ar->wmi.ops->gen_set_ap_ps)
  28683. ++ return -EOPNOTSUPP;
  28684. ++
  28685. ++ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  28686. ++ if (IS_ERR(skb))
  28687. ++ return PTR_ERR(skb);
  28688. ++
  28689. ++ return ath10k_wmi_cmd_send(ar, skb,
  28690. ++ ar->wmi.cmd->ap_ps_peer_param_cmdid);
  28691. ++}
  28692. ++
  28693. ++static inline int
  28694. ++ath10k_wmi_scan_chan_list(struct ath10k *ar,
  28695. ++ const struct wmi_scan_chan_list_arg *arg)
  28696. ++{
  28697. ++ struct sk_buff *skb;
  28698. ++
  28699. ++ if (!ar->wmi.ops->gen_scan_chan_list)
  28700. ++ return -EOPNOTSUPP;
  28701. ++
  28702. ++ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  28703. ++ if (IS_ERR(skb))
  28704. ++ return PTR_ERR(skb);
  28705. ++
  28706. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  28707. ++}
  28708. ++
  28709. ++static inline int
  28710. ++ath10k_wmi_peer_assoc(struct ath10k *ar,
  28711. ++ const struct wmi_peer_assoc_complete_arg *arg)
  28712. ++{
  28713. ++ struct sk_buff *skb;
  28714. ++
  28715. ++ if (!ar->wmi.ops->gen_peer_assoc)
  28716. ++ return -EOPNOTSUPP;
  28717. ++
  28718. ++ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  28719. ++ if (IS_ERR(skb))
  28720. ++ return PTR_ERR(skb);
  28721. ++
  28722. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  28723. ++}
  28724. ++
  28725. ++static inline int
  28726. ++ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  28727. ++ const void *bcn, size_t bcn_len,
  28728. ++ u32 bcn_paddr, bool dtim_zero,
  28729. ++ bool deliver_cab)
  28730. ++{
  28731. ++ struct sk_buff *skb;
  28732. ++ int ret;
  28733. ++
  28734. ++ if (!ar->wmi.ops->gen_beacon_dma)
  28735. ++ return -EOPNOTSUPP;
  28736. ++
  28737. ++ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  28738. ++ dtim_zero, deliver_cab);
  28739. ++ if (IS_ERR(skb))
  28740. ++ return PTR_ERR(skb);
  28741. ++
  28742. ++ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  28743. ++ ar->wmi.cmd->pdev_send_bcn_cmdid);
  28744. ++ if (ret) {
  28745. ++ dev_kfree_skb(skb);
  28746. ++ return ret;
  28747. ++ }
  28748. ++
  28749. ++ return 0;
  28750. ++}
  28751. ++
  28752. ++static inline int
  28753. ++ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  28754. ++ const struct wmi_wmm_params_all_arg *arg)
  28755. ++{
  28756. ++ struct sk_buff *skb;
  28757. ++
  28758. ++ if (!ar->wmi.ops->gen_pdev_set_wmm)
  28759. ++ return -EOPNOTSUPP;
  28760. ++
  28761. ++ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  28762. ++ if (IS_ERR(skb))
  28763. ++ return PTR_ERR(skb);
  28764. ++
  28765. ++ return ath10k_wmi_cmd_send(ar, skb,
  28766. ++ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  28767. ++}
  28768. ++
  28769. ++static inline int
  28770. ++ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  28771. ++{
  28772. ++ struct sk_buff *skb;
  28773. ++
  28774. ++ if (!ar->wmi.ops->gen_request_stats)
  28775. ++ return -EOPNOTSUPP;
  28776. ++
  28777. ++ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  28778. ++ if (IS_ERR(skb))
  28779. ++ return PTR_ERR(skb);
  28780. ++
  28781. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  28782. ++}
  28783. ++
  28784. ++static inline int
  28785. ++ath10k_wmi_force_fw_hang(struct ath10k *ar,
  28786. ++ enum wmi_force_fw_hang_type type, u32 delay_ms)
  28787. ++{
  28788. ++ struct sk_buff *skb;
  28789. ++
  28790. ++ if (!ar->wmi.ops->gen_force_fw_hang)
  28791. ++ return -EOPNOTSUPP;
  28792. ++
  28793. ++ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  28794. ++ if (IS_ERR(skb))
  28795. ++ return PTR_ERR(skb);
  28796. ++
  28797. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  28798. ++}
  28799. ++
  28800. ++static inline int
  28801. ++ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
  28802. ++{
  28803. ++ struct sk_buff *skb;
  28804. ++
  28805. ++ if (!ar->wmi.ops->gen_dbglog_cfg)
  28806. ++ return -EOPNOTSUPP;
  28807. ++
  28808. ++ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  28809. ++ if (IS_ERR(skb))
  28810. ++ return PTR_ERR(skb);
  28811. ++
  28812. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  28813. ++}
  28814. ++
  28815. ++static inline int
  28816. ++ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  28817. ++{
  28818. ++ struct sk_buff *skb;
  28819. ++
  28820. ++ if (!ar->wmi.ops->gen_pktlog_enable)
  28821. ++ return -EOPNOTSUPP;
  28822. ++
  28823. ++ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  28824. ++ if (IS_ERR(skb))
  28825. ++ return PTR_ERR(skb);
  28826. ++
  28827. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  28828. ++}
  28829. ++
  28830. ++static inline int
  28831. ++ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  28832. ++{
  28833. ++ struct sk_buff *skb;
  28834. ++
  28835. ++ if (!ar->wmi.ops->gen_pktlog_disable)
  28836. ++ return -EOPNOTSUPP;
  28837. ++
  28838. ++ skb = ar->wmi.ops->gen_pktlog_disable(ar);
  28839. ++ if (IS_ERR(skb))
  28840. ++ return PTR_ERR(skb);
  28841. ++
  28842. ++ return ath10k_wmi_cmd_send(ar, skb,
  28843. ++ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  28844. ++}
  28845. ++
  28846. ++static inline int
  28847. ++ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  28848. ++ u32 next_offset, u32 enabled)
  28849. ++{
  28850. ++ struct sk_buff *skb;
  28851. ++
  28852. ++ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  28853. ++ return -EOPNOTSUPP;
  28854. ++
  28855. ++ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  28856. ++ next_offset, enabled);
  28857. ++ if (IS_ERR(skb))
  28858. ++ return PTR_ERR(skb);
  28859. ++
  28860. ++ return ath10k_wmi_cmd_send(ar, skb,
  28861. ++ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  28862. ++}
  28863. ++
  28864. ++static inline int
  28865. ++ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  28866. ++{
  28867. ++ struct sk_buff *skb;
  28868. ++
  28869. ++ if (!ar->wmi.ops->gen_pdev_get_temperature)
  28870. ++ return -EOPNOTSUPP;
  28871. ++
  28872. ++ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  28873. ++ if (IS_ERR(skb))
  28874. ++ return PTR_ERR(skb);
  28875. ++
  28876. ++ return ath10k_wmi_cmd_send(ar, skb,
  28877. ++ ar->wmi.cmd->pdev_get_temperature_cmdid);
  28878. ++}
  28879. ++
  28880. ++static inline int
  28881. ++ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  28882. ++{
  28883. ++ struct sk_buff *skb;
  28884. ++
  28885. ++ if (!ar->wmi.ops->gen_addba_clear_resp)
  28886. ++ return -EOPNOTSUPP;
  28887. ++
  28888. ++ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  28889. ++ if (IS_ERR(skb))
  28890. ++ return PTR_ERR(skb);
  28891. ++
  28892. ++ return ath10k_wmi_cmd_send(ar, skb,
  28893. ++ ar->wmi.cmd->addba_clear_resp_cmdid);
  28894. ++}
  28895. ++
  28896. ++static inline int
  28897. ++ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  28898. ++ u32 tid, u32 buf_size)
  28899. ++{
  28900. ++ struct sk_buff *skb;
  28901. ++
  28902. ++ if (!ar->wmi.ops->gen_addba_send)
  28903. ++ return -EOPNOTSUPP;
  28904. ++
  28905. ++ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  28906. ++ if (IS_ERR(skb))
  28907. ++ return PTR_ERR(skb);
  28908. ++
  28909. ++ return ath10k_wmi_cmd_send(ar, skb,
  28910. ++ ar->wmi.cmd->addba_send_cmdid);
  28911. ++}
  28912. ++
  28913. ++static inline int
  28914. ++ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  28915. ++ u32 tid, u32 status)
  28916. ++{
  28917. ++ struct sk_buff *skb;
  28918. ++
  28919. ++ if (!ar->wmi.ops->gen_addba_set_resp)
  28920. ++ return -EOPNOTSUPP;
  28921. ++
  28922. ++ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  28923. ++ if (IS_ERR(skb))
  28924. ++ return PTR_ERR(skb);
  28925. ++
  28926. ++ return ath10k_wmi_cmd_send(ar, skb,
  28927. ++ ar->wmi.cmd->addba_set_resp_cmdid);
  28928. ++}
  28929. ++
  28930. ++static inline int
  28931. ++ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  28932. ++ u32 tid, u32 initiator, u32 reason)
  28933. ++{
  28934. ++ struct sk_buff *skb;
  28935. ++
  28936. ++ if (!ar->wmi.ops->gen_delba_send)
  28937. ++ return -EOPNOTSUPP;
  28938. ++
  28939. ++ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  28940. ++ reason);
  28941. ++ if (IS_ERR(skb))
  28942. ++ return PTR_ERR(skb);
  28943. ++
  28944. ++ return ath10k_wmi_cmd_send(ar, skb,
  28945. ++ ar->wmi.cmd->delba_send_cmdid);
  28946. ++}
  28947. ++
  28948. ++static inline int
  28949. ++ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  28950. ++ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  28951. ++ void *prb_ies, size_t prb_ies_len)
  28952. ++{
  28953. ++ struct sk_buff *skb;
  28954. ++
  28955. ++ if (!ar->wmi.ops->gen_bcn_tmpl)
  28956. ++ return -EOPNOTSUPP;
  28957. ++
  28958. ++ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  28959. ++ prb_caps, prb_erp, prb_ies,
  28960. ++ prb_ies_len);
  28961. ++ if (IS_ERR(skb))
  28962. ++ return PTR_ERR(skb);
  28963. ++
  28964. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  28965. ++}
  28966. ++
  28967. ++static inline int
  28968. ++ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  28969. ++{
  28970. ++ struct sk_buff *skb;
  28971. ++
  28972. ++ if (!ar->wmi.ops->gen_prb_tmpl)
  28973. ++ return -EOPNOTSUPP;
  28974. ++
  28975. ++ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  28976. ++ if (IS_ERR(skb))
  28977. ++ return PTR_ERR(skb);
  28978. ++
  28979. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  28980. ++}
  28981. ++
  28982. ++static inline int
  28983. ++ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  28984. ++{
  28985. ++ struct sk_buff *skb;
  28986. ++
  28987. ++ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  28988. ++ return -EOPNOTSUPP;
  28989. ++
  28990. ++ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  28991. ++ if (IS_ERR(skb))
  28992. ++ return PTR_ERR(skb);
  28993. ++
  28994. ++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  28995. ++}
  28996. ++
  28997. ++static inline int
  28998. ++ath10k_wmi_sta_keepalive(struct ath10k *ar,
  28999. ++ const struct wmi_sta_keepalive_arg *arg)
  29000. ++{
  29001. ++ struct sk_buff *skb;
  29002. ++ u32 cmd_id;
  29003. ++
  29004. ++ if (!ar->wmi.ops->gen_sta_keepalive)
  29005. ++ return -EOPNOTSUPP;
  29006. ++
  29007. ++ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  29008. ++ if (IS_ERR(skb))
  29009. ++ return PTR_ERR(skb);
  29010. ++
  29011. ++ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  29012. ++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  29013. ++}
  29014. ++
  29015. ++#endif
  29016. +--- /dev/null
  29017. ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
  29018. +@@ -0,0 +1,2796 @@
  29019. ++/*
  29020. ++ * Copyright (c) 2005-2011 Atheros Communications Inc.
  29021. ++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  29022. ++ *
  29023. ++ * Permission to use, copy, modify, and/or distribute this software for any
  29024. ++ * purpose with or without fee is hereby granted, provided that the above
  29025. ++ * copyright notice and this permission notice appear in all copies.
  29026. ++ *
  29027. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  29028. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  29029. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  29030. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  29031. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  29032. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  29033. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  29034. ++ */
  29035. ++#include "core.h"
  29036. ++#include "debug.h"
  29037. ++#include "hw.h"
  29038. ++#include "wmi.h"
  29039. ++#include "wmi-ops.h"
  29040. ++#include "wmi-tlv.h"
  29041. ++
  29042. ++/***************/
  29043. ++/* TLV helpers */
  29044. ++/**************/
  29045. ++
  29046. ++struct wmi_tlv_policy {
  29047. ++ size_t min_len;
  29048. ++};
  29049. ++
  29050. ++static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  29051. ++ [WMI_TLV_TAG_ARRAY_BYTE]
  29052. ++ = { .min_len = sizeof(u8) },
  29053. ++ [WMI_TLV_TAG_ARRAY_UINT32]
  29054. ++ = { .min_len = sizeof(u32) },
  29055. ++ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
  29056. ++ = { .min_len = sizeof(struct wmi_scan_event) },
  29057. ++ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
  29058. ++ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
  29059. ++ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
  29060. ++ = { .min_len = sizeof(struct wmi_chan_info_event) },
  29061. ++ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
  29062. ++ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
  29063. ++ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
  29064. ++ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  29065. ++ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
  29066. ++ = { .min_len = sizeof(struct wmi_host_swba_event) },
  29067. ++ [WMI_TLV_TAG_STRUCT_TIM_INFO]
  29068. ++ = { .min_len = sizeof(struct wmi_tim_info) },
  29069. ++ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
  29070. ++ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
  29071. ++ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
  29072. ++ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
  29073. ++ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
  29074. ++ = { .min_len = sizeof(struct hal_reg_capabilities) },
  29075. ++ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
  29076. ++ = { .min_len = sizeof(struct wlan_host_mem_req) },
  29077. ++ [WMI_TLV_TAG_STRUCT_READY_EVENT]
  29078. ++ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
  29079. ++ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
  29080. ++ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
  29081. ++ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
  29082. ++ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
  29083. ++};
  29084. ++
  29085. ++static int
  29086. ++ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
  29087. ++ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
  29088. ++ const void *ptr, void *data),
  29089. ++ void *data)
  29090. ++{
  29091. ++ const void *begin = ptr;
  29092. ++ const struct wmi_tlv *tlv;
  29093. ++ u16 tlv_tag, tlv_len;
  29094. ++ int ret;
  29095. ++
  29096. ++ while (len > 0) {
  29097. ++ if (len < sizeof(*tlv)) {
  29098. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  29099. ++ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  29100. ++ ptr - begin, len, sizeof(*tlv));
  29101. ++ return -EINVAL;
  29102. ++ }
  29103. ++
  29104. ++ tlv = ptr;
  29105. ++ tlv_tag = __le16_to_cpu(tlv->tag);
  29106. ++ tlv_len = __le16_to_cpu(tlv->len);
  29107. ++ ptr += sizeof(*tlv);
  29108. ++ len -= sizeof(*tlv);
  29109. ++
  29110. ++ if (tlv_len > len) {
  29111. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  29112. ++ "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
  29113. ++ tlv_tag, ptr - begin, len, tlv_len);
  29114. ++ return -EINVAL;
  29115. ++ }
  29116. ++
  29117. ++ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
  29118. ++ wmi_tlv_policies[tlv_tag].min_len &&
  29119. ++ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
  29120. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  29121. ++ "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
  29122. ++ tlv_tag, ptr - begin, tlv_len,
  29123. ++ wmi_tlv_policies[tlv_tag].min_len);
  29124. ++ return -EINVAL;
  29125. ++ }
  29126. ++
  29127. ++ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
  29128. ++ if (ret)
  29129. ++ return ret;
  29130. ++
  29131. ++ ptr += tlv_len;
  29132. ++ len -= tlv_len;
  29133. ++ }
  29134. ++
  29135. ++ return 0;
  29136. ++}
  29137. ++
  29138. ++static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
  29139. ++ const void *ptr, void *data)
  29140. ++{
  29141. ++ const void **tb = data;
  29142. ++
  29143. ++ if (tag < WMI_TLV_TAG_MAX)
  29144. ++ tb[tag] = ptr;
  29145. ++
  29146. ++ return 0;
  29147. ++}
  29148. ++
  29149. ++static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
  29150. ++ const void *ptr, size_t len)
  29151. ++{
  29152. ++ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
  29153. ++ (void *)tb);
  29154. ++}
  29155. ++
  29156. ++static const void **
  29157. ++ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
  29158. ++ size_t len, gfp_t gfp)
  29159. ++{
  29160. ++ const void **tb;
  29161. ++ int ret;
  29162. ++
  29163. ++ tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
  29164. ++ if (!tb)
  29165. ++ return ERR_PTR(-ENOMEM);
  29166. ++
  29167. ++ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
  29168. ++ if (ret) {
  29169. ++ kfree(tb);
  29170. ++ return ERR_PTR(ret);
  29171. ++ }
  29172. ++
  29173. ++ return tb;
  29174. ++}
  29175. ++
  29176. ++static u16 ath10k_wmi_tlv_len(const void *ptr)
  29177. ++{
  29178. ++ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
  29179. ++}
  29180. ++
  29181. ++/**************/
  29182. ++/* TLV events */
  29183. ++/**************/
  29184. ++static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
  29185. ++ struct sk_buff *skb)
  29186. ++{
  29187. ++ const void **tb;
  29188. ++ const struct wmi_tlv_bcn_tx_status_ev *ev;
  29189. ++ u32 vdev_id, tx_status;
  29190. ++ int ret;
  29191. ++
  29192. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29193. ++ if (IS_ERR(tb)) {
  29194. ++ ret = PTR_ERR(tb);
  29195. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29196. ++ return ret;
  29197. ++ }
  29198. ++
  29199. ++ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
  29200. ++ if (!ev) {
  29201. ++ kfree(tb);
  29202. ++ return -EPROTO;
  29203. ++ }
  29204. ++
  29205. ++ tx_status = __le32_to_cpu(ev->tx_status);
  29206. ++ vdev_id = __le32_to_cpu(ev->vdev_id);
  29207. ++
  29208. ++ switch (tx_status) {
  29209. ++ case WMI_TLV_BCN_TX_STATUS_OK:
  29210. ++ break;
  29211. ++ case WMI_TLV_BCN_TX_STATUS_XRETRY:
  29212. ++ case WMI_TLV_BCN_TX_STATUS_DROP:
  29213. ++ case WMI_TLV_BCN_TX_STATUS_FILTERED:
  29214. ++ /* FIXME: It's probably worth telling mac80211 to stop the
  29215. ++ * interface as it is crippled.
  29216. ++ */
  29217. ++ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
  29218. ++ vdev_id, tx_status);
  29219. ++ break;
  29220. ++ }
  29221. ++
  29222. ++ kfree(tb);
  29223. ++ return 0;
  29224. ++}
  29225. ++
  29226. ++static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
  29227. ++ struct sk_buff *skb)
  29228. ++{
  29229. ++ const void **tb;
  29230. ++ const struct wmi_tlv_diag_data_ev *ev;
  29231. ++ const struct wmi_tlv_diag_item *item;
  29232. ++ const void *data;
  29233. ++ int ret, num_items, len;
  29234. ++
  29235. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29236. ++ if (IS_ERR(tb)) {
  29237. ++ ret = PTR_ERR(tb);
  29238. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29239. ++ return ret;
  29240. ++ }
  29241. ++
  29242. ++ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
  29243. ++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  29244. ++ if (!ev || !data) {
  29245. ++ kfree(tb);
  29246. ++ return -EPROTO;
  29247. ++ }
  29248. ++
  29249. ++ num_items = __le32_to_cpu(ev->num_items);
  29250. ++ len = ath10k_wmi_tlv_len(data);
  29251. ++
  29252. ++ while (num_items--) {
  29253. ++ if (len == 0)
  29254. ++ break;
  29255. ++ if (len < sizeof(*item)) {
  29256. ++ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
  29257. ++ break;
  29258. ++ }
  29259. ++
  29260. ++ item = data;
  29261. ++
  29262. ++ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
  29263. ++ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
  29264. ++ break;
  29265. ++ }
  29266. ++
  29267. ++ trace_ath10k_wmi_diag_container(ar,
  29268. ++ item->type,
  29269. ++ __le32_to_cpu(item->timestamp),
  29270. ++ __le32_to_cpu(item->code),
  29271. ++ __le16_to_cpu(item->len),
  29272. ++ item->payload);
  29273. ++
  29274. ++ len -= sizeof(*item);
  29275. ++ len -= roundup(__le16_to_cpu(item->len), 4);
  29276. ++
  29277. ++ data += sizeof(*item);
  29278. ++ data += roundup(__le16_to_cpu(item->len), 4);
  29279. ++ }
  29280. ++
  29281. ++ if (num_items != -1 || len != 0)
  29282. ++ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
  29283. ++ num_items, len);
  29284. ++
  29285. ++ kfree(tb);
  29286. ++ return 0;
  29287. ++}
  29288. ++
  29289. ++static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
  29290. ++ struct sk_buff *skb)
  29291. ++{
  29292. ++ const void **tb;
  29293. ++ const void *data;
  29294. ++ int ret, len;
  29295. ++
  29296. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29297. ++ if (IS_ERR(tb)) {
  29298. ++ ret = PTR_ERR(tb);
  29299. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29300. ++ return ret;
  29301. ++ }
  29302. ++
  29303. ++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  29304. ++ if (!data) {
  29305. ++ kfree(tb);
  29306. ++ return -EPROTO;
  29307. ++ }
  29308. ++ len = ath10k_wmi_tlv_len(data);
  29309. ++
  29310. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
  29311. ++ trace_ath10k_wmi_diag(ar, data, len);
  29312. ++
  29313. ++ kfree(tb);
  29314. ++ return 0;
  29315. ++}
  29316. ++
  29317. ++/***********/
  29318. ++/* TLV ops */
  29319. ++/***********/
  29320. ++
  29321. ++static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
  29322. ++{
  29323. ++ struct wmi_cmd_hdr *cmd_hdr;
  29324. ++ enum wmi_tlv_event_id id;
  29325. ++
  29326. ++ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  29327. ++ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  29328. ++
  29329. ++ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  29330. ++ return;
  29331. ++
  29332. ++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  29333. ++
  29334. ++ switch (id) {
  29335. ++ case WMI_TLV_MGMT_RX_EVENTID:
  29336. ++ ath10k_wmi_event_mgmt_rx(ar, skb);
  29337. ++ /* mgmt_rx() owns the skb now! */
  29338. ++ return;
  29339. ++ case WMI_TLV_SCAN_EVENTID:
  29340. ++ ath10k_wmi_event_scan(ar, skb);
  29341. ++ break;
  29342. ++ case WMI_TLV_CHAN_INFO_EVENTID:
  29343. ++ ath10k_wmi_event_chan_info(ar, skb);
  29344. ++ break;
  29345. ++ case WMI_TLV_ECHO_EVENTID:
  29346. ++ ath10k_wmi_event_echo(ar, skb);
  29347. ++ break;
  29348. ++ case WMI_TLV_DEBUG_MESG_EVENTID:
  29349. ++ ath10k_wmi_event_debug_mesg(ar, skb);
  29350. ++ break;
  29351. ++ case WMI_TLV_UPDATE_STATS_EVENTID:
  29352. ++ ath10k_wmi_event_update_stats(ar, skb);
  29353. ++ break;
  29354. ++ case WMI_TLV_VDEV_START_RESP_EVENTID:
  29355. ++ ath10k_wmi_event_vdev_start_resp(ar, skb);
  29356. ++ break;
  29357. ++ case WMI_TLV_VDEV_STOPPED_EVENTID:
  29358. ++ ath10k_wmi_event_vdev_stopped(ar, skb);
  29359. ++ break;
  29360. ++ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
  29361. ++ ath10k_wmi_event_peer_sta_kickout(ar, skb);
  29362. ++ break;
  29363. ++ case WMI_TLV_HOST_SWBA_EVENTID:
  29364. ++ ath10k_wmi_event_host_swba(ar, skb);
  29365. ++ break;
  29366. ++ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
  29367. ++ ath10k_wmi_event_tbttoffset_update(ar, skb);
  29368. ++ break;
  29369. ++ case WMI_TLV_PHYERR_EVENTID:
  29370. ++ ath10k_wmi_event_phyerr(ar, skb);
  29371. ++ break;
  29372. ++ case WMI_TLV_ROAM_EVENTID:
  29373. ++ ath10k_wmi_event_roam(ar, skb);
  29374. ++ break;
  29375. ++ case WMI_TLV_PROFILE_MATCH:
  29376. ++ ath10k_wmi_event_profile_match(ar, skb);
  29377. ++ break;
  29378. ++ case WMI_TLV_DEBUG_PRINT_EVENTID:
  29379. ++ ath10k_wmi_event_debug_print(ar, skb);
  29380. ++ break;
  29381. ++ case WMI_TLV_PDEV_QVIT_EVENTID:
  29382. ++ ath10k_wmi_event_pdev_qvit(ar, skb);
  29383. ++ break;
  29384. ++ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
  29385. ++ ath10k_wmi_event_wlan_profile_data(ar, skb);
  29386. ++ break;
  29387. ++ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
  29388. ++ ath10k_wmi_event_rtt_measurement_report(ar, skb);
  29389. ++ break;
  29390. ++ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
  29391. ++ ath10k_wmi_event_tsf_measurement_report(ar, skb);
  29392. ++ break;
  29393. ++ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
  29394. ++ ath10k_wmi_event_rtt_error_report(ar, skb);
  29395. ++ break;
  29396. ++ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
  29397. ++ ath10k_wmi_event_wow_wakeup_host(ar, skb);
  29398. ++ break;
  29399. ++ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
  29400. ++ ath10k_wmi_event_dcs_interference(ar, skb);
  29401. ++ break;
  29402. ++ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
  29403. ++ ath10k_wmi_event_pdev_tpc_config(ar, skb);
  29404. ++ break;
  29405. ++ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
  29406. ++ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  29407. ++ break;
  29408. ++ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
  29409. ++ ath10k_wmi_event_gtk_offload_status(ar, skb);
  29410. ++ break;
  29411. ++ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
  29412. ++ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  29413. ++ break;
  29414. ++ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
  29415. ++ ath10k_wmi_event_delba_complete(ar, skb);
  29416. ++ break;
  29417. ++ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
  29418. ++ ath10k_wmi_event_addba_complete(ar, skb);
  29419. ++ break;
  29420. ++ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  29421. ++ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  29422. ++ break;
  29423. ++ case WMI_TLV_SERVICE_READY_EVENTID:
  29424. ++ ath10k_wmi_event_service_ready(ar, skb);
  29425. ++ break;
  29426. ++ case WMI_TLV_READY_EVENTID:
  29427. ++ ath10k_wmi_event_ready(ar, skb);
  29428. ++ break;
  29429. ++ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
  29430. ++ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
  29431. ++ break;
  29432. ++ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
  29433. ++ ath10k_wmi_tlv_event_diag_data(ar, skb);
  29434. ++ break;
  29435. ++ case WMI_TLV_DIAG_EVENTID:
  29436. ++ ath10k_wmi_tlv_event_diag(ar, skb);
  29437. ++ break;
  29438. ++ default:
  29439. ++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
  29440. ++ break;
  29441. ++ }
  29442. ++
  29443. ++ dev_kfree_skb(skb);
  29444. ++}
  29445. ++
  29446. ++static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
  29447. ++ struct sk_buff *skb,
  29448. ++ struct wmi_scan_ev_arg *arg)
  29449. ++{
  29450. ++ const void **tb;
  29451. ++ const struct wmi_scan_event *ev;
  29452. ++ int ret;
  29453. ++
  29454. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29455. ++ if (IS_ERR(tb)) {
  29456. ++ ret = PTR_ERR(tb);
  29457. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29458. ++ return ret;
  29459. ++ }
  29460. ++
  29461. ++ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
  29462. ++ if (!ev) {
  29463. ++ kfree(tb);
  29464. ++ return -EPROTO;
  29465. ++ }
  29466. ++
  29467. ++ arg->event_type = ev->event_type;
  29468. ++ arg->reason = ev->reason;
  29469. ++ arg->channel_freq = ev->channel_freq;
  29470. ++ arg->scan_req_id = ev->scan_req_id;
  29471. ++ arg->scan_id = ev->scan_id;
  29472. ++ arg->vdev_id = ev->vdev_id;
  29473. ++
  29474. ++ kfree(tb);
  29475. ++ return 0;
  29476. ++}
  29477. ++
  29478. ++static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
  29479. ++ struct sk_buff *skb,
  29480. ++ struct wmi_mgmt_rx_ev_arg *arg)
  29481. ++{
  29482. ++ const void **tb;
  29483. ++ const struct wmi_tlv_mgmt_rx_ev *ev;
  29484. ++ const u8 *frame;
  29485. ++ u32 msdu_len;
  29486. ++ int ret;
  29487. ++
  29488. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29489. ++ if (IS_ERR(tb)) {
  29490. ++ ret = PTR_ERR(tb);
  29491. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29492. ++ return ret;
  29493. ++ }
  29494. ++
  29495. ++ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
  29496. ++ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
  29497. ++
  29498. ++ if (!ev || !frame) {
  29499. ++ kfree(tb);
  29500. ++ return -EPROTO;
  29501. ++ }
  29502. ++
  29503. ++ arg->channel = ev->channel;
  29504. ++ arg->buf_len = ev->buf_len;
  29505. ++ arg->status = ev->status;
  29506. ++ arg->snr = ev->snr;
  29507. ++ arg->phy_mode = ev->phy_mode;
  29508. ++ arg->rate = ev->rate;
  29509. ++
  29510. ++ msdu_len = __le32_to_cpu(arg->buf_len);
  29511. ++
  29512. ++ if (skb->len < (frame - skb->data) + msdu_len) {
  29513. ++ kfree(tb);
  29514. ++ return -EPROTO;
  29515. ++ }
  29516. ++
  29517. ++ /* shift the sk_buff to point to `frame` */
  29518. ++ skb_trim(skb, 0);
  29519. ++ skb_put(skb, frame - skb->data);
  29520. ++ skb_pull(skb, frame - skb->data);
  29521. ++ skb_put(skb, msdu_len);
  29522. ++
  29523. ++ kfree(tb);
  29524. ++ return 0;
  29525. ++}
  29526. ++
  29527. ++static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
  29528. ++ struct sk_buff *skb,
  29529. ++ struct wmi_ch_info_ev_arg *arg)
  29530. ++{
  29531. ++ const void **tb;
  29532. ++ const struct wmi_chan_info_event *ev;
  29533. ++ int ret;
  29534. ++
  29535. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29536. ++ if (IS_ERR(tb)) {
  29537. ++ ret = PTR_ERR(tb);
  29538. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29539. ++ return ret;
  29540. ++ }
  29541. ++
  29542. ++ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
  29543. ++ if (!ev) {
  29544. ++ kfree(tb);
  29545. ++ return -EPROTO;
  29546. ++ }
  29547. ++
  29548. ++ arg->err_code = ev->err_code;
  29549. ++ arg->freq = ev->freq;
  29550. ++ arg->cmd_flags = ev->cmd_flags;
  29551. ++ arg->noise_floor = ev->noise_floor;
  29552. ++ arg->rx_clear_count = ev->rx_clear_count;
  29553. ++ arg->cycle_count = ev->cycle_count;
  29554. ++
  29555. ++ kfree(tb);
  29556. ++ return 0;
  29557. ++}
  29558. ++
  29559. ++static int
  29560. ++ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
  29561. ++ struct wmi_vdev_start_ev_arg *arg)
  29562. ++{
  29563. ++ const void **tb;
  29564. ++ const struct wmi_vdev_start_response_event *ev;
  29565. ++ int ret;
  29566. ++
  29567. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29568. ++ if (IS_ERR(tb)) {
  29569. ++ ret = PTR_ERR(tb);
  29570. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29571. ++ return ret;
  29572. ++ }
  29573. ++
  29574. ++ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
  29575. ++ if (!ev) {
  29576. ++ kfree(tb);
  29577. ++ return -EPROTO;
  29578. ++ }
  29579. ++
  29580. ++ skb_pull(skb, sizeof(*ev));
  29581. ++ arg->vdev_id = ev->vdev_id;
  29582. ++ arg->req_id = ev->req_id;
  29583. ++ arg->resp_type = ev->resp_type;
  29584. ++ arg->status = ev->status;
  29585. ++
  29586. ++ kfree(tb);
  29587. ++ return 0;
  29588. ++}
  29589. ++
  29590. ++static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
  29591. ++ struct sk_buff *skb,
  29592. ++ struct wmi_peer_kick_ev_arg *arg)
  29593. ++{
  29594. ++ const void **tb;
  29595. ++ const struct wmi_peer_sta_kickout_event *ev;
  29596. ++ int ret;
  29597. ++
  29598. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29599. ++ if (IS_ERR(tb)) {
  29600. ++ ret = PTR_ERR(tb);
  29601. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29602. ++ return ret;
  29603. ++ }
  29604. ++
  29605. ++ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
  29606. ++ if (!ev) {
  29607. ++ kfree(tb);
  29608. ++ return -EPROTO;
  29609. ++ }
  29610. ++
  29611. ++ arg->mac_addr = ev->peer_macaddr.addr;
  29612. ++
  29613. ++ kfree(tb);
  29614. ++ return 0;
  29615. ++}
  29616. ++
  29617. ++struct wmi_tlv_swba_parse {
  29618. ++ const struct wmi_host_swba_event *ev;
  29619. ++ bool tim_done;
  29620. ++ bool noa_done;
  29621. ++ size_t n_tim;
  29622. ++ size_t n_noa;
  29623. ++ struct wmi_swba_ev_arg *arg;
  29624. ++};
  29625. ++
  29626. ++static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
  29627. ++ const void *ptr, void *data)
  29628. ++{
  29629. ++ struct wmi_tlv_swba_parse *swba = data;
  29630. ++
  29631. ++ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
  29632. ++ return -EPROTO;
  29633. ++
  29634. ++ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
  29635. ++ return -ENOBUFS;
  29636. ++
  29637. ++ swba->arg->tim_info[swba->n_tim++] = ptr;
  29638. ++ return 0;
  29639. ++}
  29640. ++
  29641. ++static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
  29642. ++ const void *ptr, void *data)
  29643. ++{
  29644. ++ struct wmi_tlv_swba_parse *swba = data;
  29645. ++
  29646. ++ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
  29647. ++ return -EPROTO;
  29648. ++
  29649. ++ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
  29650. ++ return -ENOBUFS;
  29651. ++
  29652. ++ swba->arg->noa_info[swba->n_noa++] = ptr;
  29653. ++ return 0;
  29654. ++}
  29655. ++
  29656. ++static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
  29657. ++ const void *ptr, void *data)
  29658. ++{
  29659. ++ struct wmi_tlv_swba_parse *swba = data;
  29660. ++ int ret;
  29661. ++
  29662. ++ switch (tag) {
  29663. ++ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
  29664. ++ swba->ev = ptr;
  29665. ++ break;
  29666. ++ case WMI_TLV_TAG_ARRAY_STRUCT:
  29667. ++ if (!swba->tim_done) {
  29668. ++ swba->tim_done = true;
  29669. ++ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
  29670. ++ ath10k_wmi_tlv_swba_tim_parse,
  29671. ++ swba);
  29672. ++ if (ret)
  29673. ++ return ret;
  29674. ++ } else if (!swba->noa_done) {
  29675. ++ swba->noa_done = true;
  29676. ++ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
  29677. ++ ath10k_wmi_tlv_swba_noa_parse,
  29678. ++ swba);
  29679. ++ if (ret)
  29680. ++ return ret;
  29681. ++ }
  29682. ++ break;
  29683. ++ default:
  29684. ++ break;
  29685. ++ }
  29686. ++ return 0;
  29687. ++}
  29688. ++
  29689. ++static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
  29690. ++ struct sk_buff *skb,
  29691. ++ struct wmi_swba_ev_arg *arg)
  29692. ++{
  29693. ++ struct wmi_tlv_swba_parse swba = { .arg = arg };
  29694. ++ u32 map;
  29695. ++ size_t n_vdevs;
  29696. ++ int ret;
  29697. ++
  29698. ++ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
  29699. ++ ath10k_wmi_tlv_swba_parse, &swba);
  29700. ++ if (ret) {
  29701. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29702. ++ return ret;
  29703. ++ }
  29704. ++
  29705. ++ if (!swba.ev)
  29706. ++ return -EPROTO;
  29707. ++
  29708. ++ arg->vdev_map = swba.ev->vdev_map;
  29709. ++
  29710. ++ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
  29711. ++ if (map & BIT(0))
  29712. ++ n_vdevs++;
  29713. ++
  29714. ++ if (n_vdevs != swba.n_tim ||
  29715. ++ n_vdevs != swba.n_noa)
  29716. ++ return -EPROTO;
  29717. ++
  29718. ++ return 0;
  29719. ++}
  29720. ++
  29721. ++static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
  29722. ++ struct sk_buff *skb,
  29723. ++ struct wmi_phyerr_ev_arg *arg)
  29724. ++{
  29725. ++ const void **tb;
  29726. ++ const struct wmi_tlv_phyerr_ev *ev;
  29727. ++ const void *phyerrs;
  29728. ++ int ret;
  29729. ++
  29730. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29731. ++ if (IS_ERR(tb)) {
  29732. ++ ret = PTR_ERR(tb);
  29733. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29734. ++ return ret;
  29735. ++ }
  29736. ++
  29737. ++ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
  29738. ++ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
  29739. ++
  29740. ++ if (!ev || !phyerrs) {
  29741. ++ kfree(tb);
  29742. ++ return -EPROTO;
  29743. ++ }
  29744. ++
  29745. ++ arg->num_phyerrs = ev->num_phyerrs;
  29746. ++ arg->tsf_l32 = ev->tsf_l32;
  29747. ++ arg->tsf_u32 = ev->tsf_u32;
  29748. ++ arg->buf_len = ev->buf_len;
  29749. ++ arg->phyerrs = phyerrs;
  29750. ++
  29751. ++ kfree(tb);
  29752. ++ return 0;
  29753. ++}
  29754. ++
  29755. ++#define WMI_TLV_ABI_VER_NS0 0x5F414351
  29756. ++#define WMI_TLV_ABI_VER_NS1 0x00004C4D
  29757. ++#define WMI_TLV_ABI_VER_NS2 0x00000000
  29758. ++#define WMI_TLV_ABI_VER_NS3 0x00000000
  29759. ++
  29760. ++#define WMI_TLV_ABI_VER0_MAJOR 1
  29761. ++#define WMI_TLV_ABI_VER0_MINOR 0
  29762. ++#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
  29763. ++ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
  29764. ++#define WMI_TLV_ABI_VER1 53
  29765. ++
  29766. ++static int
  29767. ++ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
  29768. ++ const void *ptr, void *data)
  29769. ++{
  29770. ++ struct wmi_svc_rdy_ev_arg *arg = data;
  29771. ++ int i;
  29772. ++
  29773. ++ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
  29774. ++ return -EPROTO;
  29775. ++
  29776. ++ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
  29777. ++ if (!arg->mem_reqs[i]) {
  29778. ++ arg->mem_reqs[i] = ptr;
  29779. ++ return 0;
  29780. ++ }
  29781. ++ }
  29782. ++
  29783. ++ return -ENOMEM;
  29784. ++}
  29785. ++
  29786. ++static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
  29787. ++ struct sk_buff *skb,
  29788. ++ struct wmi_svc_rdy_ev_arg *arg)
  29789. ++{
  29790. ++ const void **tb;
  29791. ++ const struct hal_reg_capabilities *reg;
  29792. ++ const struct wmi_tlv_svc_rdy_ev *ev;
  29793. ++ const __le32 *svc_bmap;
  29794. ++ const struct wlan_host_mem_req *mem_reqs;
  29795. ++ int ret;
  29796. ++
  29797. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29798. ++ if (IS_ERR(tb)) {
  29799. ++ ret = PTR_ERR(tb);
  29800. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29801. ++ return ret;
  29802. ++ }
  29803. ++
  29804. ++ ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
  29805. ++ reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
  29806. ++ svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
  29807. ++ mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
  29808. ++
  29809. ++ if (!ev || !reg || !svc_bmap || !mem_reqs) {
  29810. ++ kfree(tb);
  29811. ++ return -EPROTO;
  29812. ++ }
  29813. ++
  29814. ++ /* This is an internal ABI compatibility check for WMI TLV so check it
  29815. ++ * here instead of the generic WMI code.
  29816. ++ */
  29817. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  29818. ++ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
  29819. ++ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
  29820. ++ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
  29821. ++ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
  29822. ++ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
  29823. ++ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
  29824. ++
  29825. ++ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
  29826. ++ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
  29827. ++ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
  29828. ++ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
  29829. ++ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
  29830. ++ kfree(tb);
  29831. ++ return -ENOTSUPP;
  29832. ++ }
  29833. ++
  29834. ++ arg->min_tx_power = ev->hw_min_tx_power;
  29835. ++ arg->max_tx_power = ev->hw_max_tx_power;
  29836. ++ arg->ht_cap = ev->ht_cap_info;
  29837. ++ arg->vht_cap = ev->vht_cap_info;
  29838. ++ arg->sw_ver0 = ev->abi.abi_ver0;
  29839. ++ arg->sw_ver1 = ev->abi.abi_ver1;
  29840. ++ arg->fw_build = ev->fw_build_vers;
  29841. ++ arg->phy_capab = ev->phy_capability;
  29842. ++ arg->num_rf_chains = ev->num_rf_chains;
  29843. ++ arg->eeprom_rd = reg->eeprom_rd;
  29844. ++ arg->num_mem_reqs = ev->num_mem_reqs;
  29845. ++ arg->service_map = svc_bmap;
  29846. ++ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
  29847. ++
  29848. ++ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
  29849. ++ ath10k_wmi_tlv_parse_mem_reqs, arg);
  29850. ++ if (ret) {
  29851. ++ kfree(tb);
  29852. ++ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
  29853. ++ return ret;
  29854. ++ }
  29855. ++
  29856. ++ kfree(tb);
  29857. ++ return 0;
  29858. ++}
  29859. ++
  29860. ++static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
  29861. ++ struct sk_buff *skb,
  29862. ++ struct wmi_rdy_ev_arg *arg)
  29863. ++{
  29864. ++ const void **tb;
  29865. ++ const struct wmi_tlv_rdy_ev *ev;
  29866. ++ int ret;
  29867. ++
  29868. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29869. ++ if (IS_ERR(tb)) {
  29870. ++ ret = PTR_ERR(tb);
  29871. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29872. ++ return ret;
  29873. ++ }
  29874. ++
  29875. ++ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
  29876. ++ if (!ev) {
  29877. ++ kfree(tb);
  29878. ++ return -EPROTO;
  29879. ++ }
  29880. ++
  29881. ++ arg->sw_version = ev->abi.abi_ver0;
  29882. ++ arg->abi_version = ev->abi.abi_ver1;
  29883. ++ arg->status = ev->status;
  29884. ++ arg->mac_addr = ev->mac_addr.addr;
  29885. ++
  29886. ++ kfree(tb);
  29887. ++ return 0;
  29888. ++}
  29889. ++
  29890. ++static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
  29891. ++ struct ath10k_fw_stats_vdev *dst)
  29892. ++{
  29893. ++ int i;
  29894. ++
  29895. ++ dst->vdev_id = __le32_to_cpu(src->vdev_id);
  29896. ++ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
  29897. ++ dst->data_snr = __le32_to_cpu(src->data_snr);
  29898. ++ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
  29899. ++ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
  29900. ++ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
  29901. ++ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
  29902. ++ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
  29903. ++ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
  29904. ++
  29905. ++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
  29906. ++ dst->num_tx_frames[i] =
  29907. ++ __le32_to_cpu(src->num_tx_frames[i]);
  29908. ++
  29909. ++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
  29910. ++ dst->num_tx_frames_retries[i] =
  29911. ++ __le32_to_cpu(src->num_tx_frames_retries[i]);
  29912. ++
  29913. ++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
  29914. ++ dst->num_tx_frames_failures[i] =
  29915. ++ __le32_to_cpu(src->num_tx_frames_failures[i]);
  29916. ++
  29917. ++ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
  29918. ++ dst->tx_rate_history[i] =
  29919. ++ __le32_to_cpu(src->tx_rate_history[i]);
  29920. ++
  29921. ++ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
  29922. ++ dst->beacon_rssi_history[i] =
  29923. ++ __le32_to_cpu(src->beacon_rssi_history[i]);
  29924. ++}
  29925. ++
  29926. ++static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
  29927. ++ struct sk_buff *skb,
  29928. ++ struct ath10k_fw_stats *stats)
  29929. ++{
  29930. ++ const void **tb;
  29931. ++ const struct wmi_tlv_stats_ev *ev;
  29932. ++ const void *data;
  29933. ++ u32 num_pdev_stats;
  29934. ++ u32 num_vdev_stats;
  29935. ++ u32 num_peer_stats;
  29936. ++ u32 num_bcnflt_stats;
  29937. ++ u32 num_chan_stats;
  29938. ++ size_t data_len;
  29939. ++ int ret;
  29940. ++ int i;
  29941. ++
  29942. ++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  29943. ++ if (IS_ERR(tb)) {
  29944. ++ ret = PTR_ERR(tb);
  29945. ++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  29946. ++ return ret;
  29947. ++ }
  29948. ++
  29949. ++ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
  29950. ++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  29951. ++
  29952. ++ if (!ev || !data) {
  29953. ++ kfree(tb);
  29954. ++ return -EPROTO;
  29955. ++ }
  29956. ++
  29957. ++ data_len = ath10k_wmi_tlv_len(data);
  29958. ++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  29959. ++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  29960. ++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  29961. ++ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
  29962. ++ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
  29963. ++
  29964. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  29965. ++ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
  29966. ++ num_pdev_stats, num_vdev_stats, num_peer_stats,
  29967. ++ num_bcnflt_stats, num_chan_stats);
  29968. ++
  29969. ++ for (i = 0; i < num_pdev_stats; i++) {
  29970. ++ const struct wmi_pdev_stats *src;
  29971. ++ struct ath10k_fw_stats_pdev *dst;
  29972. ++
  29973. ++ src = data;
  29974. ++ if (data_len < sizeof(*src))
  29975. ++ return -EPROTO;
  29976. ++
  29977. ++ data += sizeof(*src);
  29978. ++ data_len -= sizeof(*src);
  29979. ++
  29980. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  29981. ++ if (!dst)
  29982. ++ continue;
  29983. ++
  29984. ++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  29985. ++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  29986. ++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  29987. ++ list_add_tail(&dst->list, &stats->pdevs);
  29988. ++ }
  29989. ++
  29990. ++ for (i = 0; i < num_vdev_stats; i++) {
  29991. ++ const struct wmi_tlv_vdev_stats *src;
  29992. ++ struct ath10k_fw_stats_vdev *dst;
  29993. ++
  29994. ++ src = data;
  29995. ++ if (data_len < sizeof(*src))
  29996. ++ return -EPROTO;
  29997. ++
  29998. ++ data += sizeof(*src);
  29999. ++ data_len -= sizeof(*src);
  30000. ++
  30001. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  30002. ++ if (!dst)
  30003. ++ continue;
  30004. ++
  30005. ++ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
  30006. ++ list_add_tail(&dst->list, &stats->vdevs);
  30007. ++ }
  30008. ++
  30009. ++ for (i = 0; i < num_peer_stats; i++) {
  30010. ++ const struct wmi_10x_peer_stats *src;
  30011. ++ struct ath10k_fw_stats_peer *dst;
  30012. ++
  30013. ++ src = data;
  30014. ++ if (data_len < sizeof(*src))
  30015. ++ return -EPROTO;
  30016. ++
  30017. ++ data += sizeof(*src);
  30018. ++ data_len -= sizeof(*src);
  30019. ++
  30020. ++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  30021. ++ if (!dst)
  30022. ++ continue;
  30023. ++
  30024. ++ ath10k_wmi_pull_peer_stats(&src->old, dst);
  30025. ++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  30026. ++ list_add_tail(&dst->list, &stats->peers);
  30027. ++ }
  30028. ++
  30029. ++ kfree(tb);
  30030. ++ return 0;
  30031. ++}
  30032. ++
  30033. ++static struct sk_buff *
  30034. ++ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
  30035. ++{
  30036. ++ struct wmi_tlv_pdev_suspend *cmd;
  30037. ++ struct wmi_tlv *tlv;
  30038. ++ struct sk_buff *skb;
  30039. ++
  30040. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30041. ++ if (!skb)
  30042. ++ return ERR_PTR(-ENOMEM);
  30043. ++
  30044. ++ tlv = (void *)skb->data;
  30045. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
  30046. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30047. ++ cmd = (void *)tlv->value;
  30048. ++ cmd->opt = __cpu_to_le32(opt);
  30049. ++
  30050. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
  30051. ++ return skb;
  30052. ++}
  30053. ++
  30054. ++static struct sk_buff *
  30055. ++ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
  30056. ++{
  30057. ++ struct wmi_tlv_resume_cmd *cmd;
  30058. ++ struct wmi_tlv *tlv;
  30059. ++ struct sk_buff *skb;
  30060. ++
  30061. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30062. ++ if (!skb)
  30063. ++ return ERR_PTR(-ENOMEM);
  30064. ++
  30065. ++ tlv = (void *)skb->data;
  30066. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
  30067. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30068. ++ cmd = (void *)tlv->value;
  30069. ++ cmd->reserved = __cpu_to_le32(0);
  30070. ++
  30071. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
  30072. ++ return skb;
  30073. ++}
  30074. ++
  30075. ++static struct sk_buff *
  30076. ++ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
  30077. ++ u16 rd, u16 rd2g, u16 rd5g,
  30078. ++ u16 ctl2g, u16 ctl5g,
  30079. ++ enum wmi_dfs_region dfs_reg)
  30080. ++{
  30081. ++ struct wmi_tlv_pdev_set_rd_cmd *cmd;
  30082. ++ struct wmi_tlv *tlv;
  30083. ++ struct sk_buff *skb;
  30084. ++
  30085. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30086. ++ if (!skb)
  30087. ++ return ERR_PTR(-ENOMEM);
  30088. ++
  30089. ++ tlv = (void *)skb->data;
  30090. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
  30091. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30092. ++ cmd = (void *)tlv->value;
  30093. ++ cmd->regd = __cpu_to_le32(rd);
  30094. ++ cmd->regd_2ghz = __cpu_to_le32(rd2g);
  30095. ++ cmd->regd_5ghz = __cpu_to_le32(rd5g);
  30096. ++ cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
  30097. ++ cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
  30098. ++
  30099. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
  30100. ++ return skb;
  30101. ++}
  30102. ++
  30103. ++static struct sk_buff *
  30104. ++ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
  30105. ++ u32 param_value)
  30106. ++{
  30107. ++ struct wmi_tlv_pdev_set_param_cmd *cmd;
  30108. ++ struct wmi_tlv *tlv;
  30109. ++ struct sk_buff *skb;
  30110. ++
  30111. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30112. ++ if (!skb)
  30113. ++ return ERR_PTR(-ENOMEM);
  30114. ++
  30115. ++ tlv = (void *)skb->data;
  30116. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
  30117. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30118. ++ cmd = (void *)tlv->value;
  30119. ++ cmd->param_id = __cpu_to_le32(param_id);
  30120. ++ cmd->param_value = __cpu_to_le32(param_value);
  30121. ++
  30122. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
  30123. ++ return skb;
  30124. ++}
  30125. ++
  30126. ++static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
  30127. ++{
  30128. ++ struct sk_buff *skb;
  30129. ++ struct wmi_tlv *tlv;
  30130. ++ struct wmi_tlv_init_cmd *cmd;
  30131. ++ struct wmi_tlv_resource_config *cfg;
  30132. ++ struct wmi_host_mem_chunks *chunks;
  30133. ++ size_t len, chunks_len;
  30134. ++ void *ptr;
  30135. ++
  30136. ++ chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
  30137. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  30138. ++ (sizeof(*tlv) + sizeof(*cfg)) +
  30139. ++ (sizeof(*tlv) + chunks_len);
  30140. ++
  30141. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30142. ++ if (!skb)
  30143. ++ return ERR_PTR(-ENOMEM);
  30144. ++
  30145. ++ ptr = skb->data;
  30146. ++
  30147. ++ tlv = ptr;
  30148. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
  30149. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30150. ++ cmd = (void *)tlv->value;
  30151. ++ ptr += sizeof(*tlv);
  30152. ++ ptr += sizeof(*cmd);
  30153. ++
  30154. ++ tlv = ptr;
  30155. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
  30156. ++ tlv->len = __cpu_to_le16(sizeof(*cfg));
  30157. ++ cfg = (void *)tlv->value;
  30158. ++ ptr += sizeof(*tlv);
  30159. ++ ptr += sizeof(*cfg);
  30160. ++
  30161. ++ tlv = ptr;
  30162. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  30163. ++ tlv->len = __cpu_to_le16(chunks_len);
  30164. ++ chunks = (void *)tlv->value;
  30165. ++
  30166. ++ ptr += sizeof(*tlv);
  30167. ++ ptr += chunks_len;
  30168. ++
  30169. ++ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
  30170. ++ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
  30171. ++ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
  30172. ++ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
  30173. ++ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
  30174. ++ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
  30175. ++ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  30176. ++
  30177. ++ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  30178. ++ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
  30179. ++
  30180. ++ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
  30181. ++ cfg->num_offload_peers = __cpu_to_le32(3);
  30182. ++ cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
  30183. ++ } else {
  30184. ++ cfg->num_offload_peers = __cpu_to_le32(0);
  30185. ++ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
  30186. ++ }
  30187. ++
  30188. ++ cfg->num_peer_keys = __cpu_to_le32(2);
  30189. ++ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
  30190. ++ cfg->ast_skid_limit = __cpu_to_le32(0x10);
  30191. ++ cfg->tx_chain_mask = __cpu_to_le32(0x7);
  30192. ++ cfg->rx_chain_mask = __cpu_to_le32(0x7);
  30193. ++ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
  30194. ++ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
  30195. ++ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
  30196. ++ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
  30197. ++ cfg->rx_decap_mode = __cpu_to_le32(1);
  30198. ++ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
  30199. ++ cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
  30200. ++ cfg->roam_offload_max_vdev = __cpu_to_le32(3);
  30201. ++ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
  30202. ++ cfg->num_mcast_groups = __cpu_to_le32(0);
  30203. ++ cfg->num_mcast_table_elems = __cpu_to_le32(0);
  30204. ++ cfg->mcast2ucast_mode = __cpu_to_le32(0);
  30205. ++ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
  30206. ++ cfg->num_wds_entries = __cpu_to_le32(0x20);
  30207. ++ cfg->dma_burst_size = __cpu_to_le32(0);
  30208. ++ cfg->mac_aggr_delim = __cpu_to_le32(0);
  30209. ++ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
  30210. ++ cfg->vow_config = __cpu_to_le32(0);
  30211. ++ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
  30212. ++ cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
  30213. ++ cfg->max_frag_entries = __cpu_to_le32(2);
  30214. ++ cfg->num_tdls_vdevs = __cpu_to_le32(1);
  30215. ++ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
  30216. ++ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
  30217. ++ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
  30218. ++ cfg->num_wow_filters = __cpu_to_le32(0x16);
  30219. ++ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
  30220. ++ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
  30221. ++ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
  30222. ++ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
  30223. ++
  30224. ++ ath10k_wmi_put_host_mem_chunks(ar, chunks);
  30225. ++
  30226. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
  30227. ++ return skb;
  30228. ++}
  30229. ++
  30230. ++static struct sk_buff *
  30231. ++ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
  30232. ++ const struct wmi_start_scan_arg *arg)
  30233. ++{
  30234. ++ struct wmi_tlv_start_scan_cmd *cmd;
  30235. ++ struct wmi_tlv *tlv;
  30236. ++ struct sk_buff *skb;
  30237. ++ size_t len, chan_len, ssid_len, bssid_len, ie_len;
  30238. ++ __le32 *chans;
  30239. ++ struct wmi_ssid *ssids;
  30240. ++ struct wmi_mac_addr *addrs;
  30241. ++ void *ptr;
  30242. ++ int i, ret;
  30243. ++
  30244. ++ ret = ath10k_wmi_start_scan_verify(arg);
  30245. ++ if (ret)
  30246. ++ return ERR_PTR(ret);
  30247. ++
  30248. ++ chan_len = arg->n_channels * sizeof(__le32);
  30249. ++ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
  30250. ++ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
  30251. ++ ie_len = roundup(arg->ie_len, 4);
  30252. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  30253. ++ (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
  30254. ++ (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
  30255. ++ (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
  30256. ++ (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
  30257. ++
  30258. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30259. ++ if (!skb)
  30260. ++ return ERR_PTR(-ENOMEM);
  30261. ++
  30262. ++ ptr = (void *)skb->data;
  30263. ++ tlv = ptr;
  30264. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
  30265. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30266. ++ cmd = (void *)tlv->value;
  30267. ++
  30268. ++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  30269. ++ cmd->burst_duration_ms = __cpu_to_le32(0);
  30270. ++ cmd->num_channels = __cpu_to_le32(arg->n_channels);
  30271. ++ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
  30272. ++ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
  30273. ++ cmd->ie_len = __cpu_to_le32(arg->ie_len);
  30274. ++ cmd->num_probes = __cpu_to_le32(3);
  30275. ++
  30276. ++ /* FIXME: There are some scan flag inconsistencies across firmwares,
  30277. ++ * e.g. WMI-TLV inverts the logic behind the following flag.
  30278. ++ */
  30279. ++ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
  30280. ++
  30281. ++ ptr += sizeof(*tlv);
  30282. ++ ptr += sizeof(*cmd);
  30283. ++
  30284. ++ tlv = ptr;
  30285. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  30286. ++ tlv->len = __cpu_to_le16(chan_len);
  30287. ++ chans = (void *)tlv->value;
  30288. ++ for (i = 0; i < arg->n_channels; i++)
  30289. ++ chans[i] = __cpu_to_le32(arg->channels[i]);
  30290. ++
  30291. ++ ptr += sizeof(*tlv);
  30292. ++ ptr += chan_len;
  30293. ++
  30294. ++ tlv = ptr;
  30295. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
  30296. ++ tlv->len = __cpu_to_le16(ssid_len);
  30297. ++ ssids = (void *)tlv->value;
  30298. ++ for (i = 0; i < arg->n_ssids; i++) {
  30299. ++ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
  30300. ++ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
  30301. ++ }
  30302. ++
  30303. ++ ptr += sizeof(*tlv);
  30304. ++ ptr += ssid_len;
  30305. ++
  30306. ++ tlv = ptr;
  30307. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
  30308. ++ tlv->len = __cpu_to_le16(bssid_len);
  30309. ++ addrs = (void *)tlv->value;
  30310. ++ for (i = 0; i < arg->n_bssids; i++)
  30311. ++ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
  30312. ++
  30313. ++ ptr += sizeof(*tlv);
  30314. ++ ptr += bssid_len;
  30315. ++
  30316. ++ tlv = ptr;
  30317. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  30318. ++ tlv->len = __cpu_to_le16(ie_len);
  30319. ++ memcpy(tlv->value, arg->ie, arg->ie_len);
  30320. ++
  30321. ++ ptr += sizeof(*tlv);
  30322. ++ ptr += ie_len;
  30323. ++
  30324. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
  30325. ++ return skb;
  30326. ++}
  30327. ++
  30328. ++static struct sk_buff *
  30329. ++ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
  30330. ++ const struct wmi_stop_scan_arg *arg)
  30331. ++{
  30332. ++ struct wmi_stop_scan_cmd *cmd;
  30333. ++ struct wmi_tlv *tlv;
  30334. ++ struct sk_buff *skb;
  30335. ++ u32 scan_id;
  30336. ++ u32 req_id;
  30337. ++
  30338. ++ if (arg->req_id > 0xFFF)
  30339. ++ return ERR_PTR(-EINVAL);
  30340. ++ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  30341. ++ return ERR_PTR(-EINVAL);
  30342. ++
  30343. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30344. ++ if (!skb)
  30345. ++ return ERR_PTR(-ENOMEM);
  30346. ++
  30347. ++ scan_id = arg->u.scan_id;
  30348. ++ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  30349. ++
  30350. ++ req_id = arg->req_id;
  30351. ++ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  30352. ++
  30353. ++ tlv = (void *)skb->data;
  30354. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
  30355. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30356. ++ cmd = (void *)tlv->value;
  30357. ++ cmd->req_type = __cpu_to_le32(arg->req_type);
  30358. ++ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  30359. ++ cmd->scan_id = __cpu_to_le32(scan_id);
  30360. ++ cmd->scan_req_id = __cpu_to_le32(req_id);
  30361. ++
  30362. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
  30363. ++ return skb;
  30364. ++}
  30365. ++
  30366. ++static struct sk_buff *
  30367. ++ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
  30368. ++ u32 vdev_id,
  30369. ++ enum wmi_vdev_type vdev_type,
  30370. ++ enum wmi_vdev_subtype vdev_subtype,
  30371. ++ const u8 mac_addr[ETH_ALEN])
  30372. ++{
  30373. ++ struct wmi_vdev_create_cmd *cmd;
  30374. ++ struct wmi_tlv *tlv;
  30375. ++ struct sk_buff *skb;
  30376. ++
  30377. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30378. ++ if (!skb)
  30379. ++ return ERR_PTR(-ENOMEM);
  30380. ++
  30381. ++ tlv = (void *)skb->data;
  30382. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
  30383. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30384. ++ cmd = (void *)tlv->value;
  30385. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30386. ++ cmd->vdev_type = __cpu_to_le32(vdev_type);
  30387. ++ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
  30388. ++ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
  30389. ++
  30390. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
  30391. ++ return skb;
  30392. ++}
  30393. ++
  30394. ++static struct sk_buff *
  30395. ++ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
  30396. ++{
  30397. ++ struct wmi_vdev_delete_cmd *cmd;
  30398. ++ struct wmi_tlv *tlv;
  30399. ++ struct sk_buff *skb;
  30400. ++
  30401. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30402. ++ if (!skb)
  30403. ++ return ERR_PTR(-ENOMEM);
  30404. ++
  30405. ++ tlv = (void *)skb->data;
  30406. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
  30407. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30408. ++ cmd = (void *)tlv->value;
  30409. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30410. ++
  30411. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
  30412. ++ return skb;
  30413. ++}
  30414. ++
  30415. ++static struct sk_buff *
  30416. ++ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
  30417. ++ const struct wmi_vdev_start_request_arg *arg,
  30418. ++ bool restart)
  30419. ++{
  30420. ++ struct wmi_tlv_vdev_start_cmd *cmd;
  30421. ++ struct wmi_channel *ch;
  30422. ++ struct wmi_p2p_noa_descriptor *noa;
  30423. ++ struct wmi_tlv *tlv;
  30424. ++ struct sk_buff *skb;
  30425. ++ size_t len;
  30426. ++ void *ptr;
  30427. ++ u32 flags = 0;
  30428. ++
  30429. ++ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  30430. ++ return ERR_PTR(-EINVAL);
  30431. ++ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  30432. ++ return ERR_PTR(-EINVAL);
  30433. ++ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  30434. ++ return ERR_PTR(-EINVAL);
  30435. ++
  30436. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  30437. ++ (sizeof(*tlv) + sizeof(*ch)) +
  30438. ++ (sizeof(*tlv) + 0);
  30439. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30440. ++ if (!skb)
  30441. ++ return ERR_PTR(-ENOMEM);
  30442. ++
  30443. ++ if (arg->hidden_ssid)
  30444. ++ flags |= WMI_VDEV_START_HIDDEN_SSID;
  30445. ++ if (arg->pmf_enabled)
  30446. ++ flags |= WMI_VDEV_START_PMF_ENABLED;
  30447. ++
  30448. ++ ptr = (void *)skb->data;
  30449. ++
  30450. ++ tlv = ptr;
  30451. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
  30452. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30453. ++ cmd = (void *)tlv->value;
  30454. ++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  30455. ++ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
  30456. ++ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  30457. ++ cmd->flags = __cpu_to_le32(flags);
  30458. ++ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  30459. ++ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  30460. ++ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  30461. ++
  30462. ++ if (arg->ssid) {
  30463. ++ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  30464. ++ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  30465. ++ }
  30466. ++
  30467. ++ ptr += sizeof(*tlv);
  30468. ++ ptr += sizeof(*cmd);
  30469. ++
  30470. ++ tlv = ptr;
  30471. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
  30472. ++ tlv->len = __cpu_to_le16(sizeof(*ch));
  30473. ++ ch = (void *)tlv->value;
  30474. ++ ath10k_wmi_put_wmi_channel(ch, &arg->channel);
  30475. ++
  30476. ++ ptr += sizeof(*tlv);
  30477. ++ ptr += sizeof(*ch);
  30478. ++
  30479. ++ tlv = ptr;
  30480. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  30481. ++ tlv->len = 0;
  30482. ++ noa = (void *)tlv->value;
  30483. ++
  30484. ++ /* Note: This is a nested TLV containing:
  30485. ++ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
  30486. ++ */
  30487. ++
  30488. ++ ptr += sizeof(*tlv);
  30489. ++ ptr += 0;
  30490. ++
  30491. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
  30492. ++ return skb;
  30493. ++}
  30494. ++
  30495. ++static struct sk_buff *
  30496. ++ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
  30497. ++{
  30498. ++ struct wmi_vdev_stop_cmd *cmd;
  30499. ++ struct wmi_tlv *tlv;
  30500. ++ struct sk_buff *skb;
  30501. ++
  30502. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30503. ++ if (!skb)
  30504. ++ return ERR_PTR(-ENOMEM);
  30505. ++
  30506. ++ tlv = (void *)skb->data;
  30507. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
  30508. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30509. ++ cmd = (void *)tlv->value;
  30510. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30511. ++
  30512. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
  30513. ++ return skb;
  30514. ++}
  30515. ++
  30516. ++static struct sk_buff *
  30517. ++ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
  30518. ++ const u8 *bssid)
  30519. ++
  30520. ++{
  30521. ++ struct wmi_vdev_up_cmd *cmd;
  30522. ++ struct wmi_tlv *tlv;
  30523. ++ struct sk_buff *skb;
  30524. ++
  30525. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30526. ++ if (!skb)
  30527. ++ return ERR_PTR(-ENOMEM);
  30528. ++
  30529. ++ tlv = (void *)skb->data;
  30530. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
  30531. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30532. ++ cmd = (void *)tlv->value;
  30533. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30534. ++ cmd->vdev_assoc_id = __cpu_to_le32(aid);
  30535. ++ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  30536. ++
  30537. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
  30538. ++ return skb;
  30539. ++}
  30540. ++
  30541. ++static struct sk_buff *
  30542. ++ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
  30543. ++{
  30544. ++ struct wmi_vdev_down_cmd *cmd;
  30545. ++ struct wmi_tlv *tlv;
  30546. ++ struct sk_buff *skb;
  30547. ++
  30548. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30549. ++ if (!skb)
  30550. ++ return ERR_PTR(-ENOMEM);
  30551. ++
  30552. ++ tlv = (void *)skb->data;
  30553. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
  30554. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30555. ++ cmd = (void *)tlv->value;
  30556. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30557. ++
  30558. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
  30559. ++ return skb;
  30560. ++}
  30561. ++
  30562. ++static struct sk_buff *
  30563. ++ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  30564. ++ u32 param_id, u32 param_value)
  30565. ++{
  30566. ++ struct wmi_vdev_set_param_cmd *cmd;
  30567. ++ struct wmi_tlv *tlv;
  30568. ++ struct sk_buff *skb;
  30569. ++
  30570. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30571. ++ if (!skb)
  30572. ++ return ERR_PTR(-ENOMEM);
  30573. ++
  30574. ++ tlv = (void *)skb->data;
  30575. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
  30576. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30577. ++ cmd = (void *)tlv->value;
  30578. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30579. ++ cmd->param_id = __cpu_to_le32(param_id);
  30580. ++ cmd->param_value = __cpu_to_le32(param_value);
  30581. ++
  30582. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
  30583. ++ return skb;
  30584. ++}
  30585. ++
  30586. ++static struct sk_buff *
  30587. ++ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
  30588. ++ const struct wmi_vdev_install_key_arg *arg)
  30589. ++{
  30590. ++ struct wmi_vdev_install_key_cmd *cmd;
  30591. ++ struct wmi_tlv *tlv;
  30592. ++ struct sk_buff *skb;
  30593. ++ size_t len;
  30594. ++ void *ptr;
  30595. ++
  30596. ++ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  30597. ++ return ERR_PTR(-EINVAL);
  30598. ++ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  30599. ++ return ERR_PTR(-EINVAL);
  30600. ++
  30601. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  30602. ++ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
  30603. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30604. ++ if (!skb)
  30605. ++ return ERR_PTR(-ENOMEM);
  30606. ++
  30607. ++ ptr = (void *)skb->data;
  30608. ++ tlv = ptr;
  30609. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
  30610. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30611. ++ cmd = (void *)tlv->value;
  30612. ++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  30613. ++ cmd->key_idx = __cpu_to_le32(arg->key_idx);
  30614. ++ cmd->key_flags = __cpu_to_le32(arg->key_flags);
  30615. ++ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  30616. ++ cmd->key_len = __cpu_to_le32(arg->key_len);
  30617. ++ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  30618. ++ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  30619. ++
  30620. ++ if (arg->macaddr)
  30621. ++ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  30622. ++
  30623. ++ ptr += sizeof(*tlv);
  30624. ++ ptr += sizeof(*cmd);
  30625. ++
  30626. ++ tlv = ptr;
  30627. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  30628. ++ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
  30629. ++ if (arg->key_data)
  30630. ++ memcpy(tlv->value, arg->key_data, arg->key_len);
  30631. ++
  30632. ++ ptr += sizeof(*tlv);
  30633. ++ ptr += roundup(arg->key_len, sizeof(__le32));
  30634. ++
  30635. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
  30636. ++ return skb;
  30637. ++}
  30638. ++
  30639. ++static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
  30640. ++ const struct wmi_sta_uapsd_auto_trig_arg *arg)
  30641. ++{
  30642. ++ struct wmi_sta_uapsd_auto_trig_param *ac;
  30643. ++ struct wmi_tlv *tlv;
  30644. ++
  30645. ++ tlv = ptr;
  30646. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
  30647. ++ tlv->len = __cpu_to_le16(sizeof(*ac));
  30648. ++ ac = (void *)tlv->value;
  30649. ++
  30650. ++ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
  30651. ++ ac->user_priority = __cpu_to_le32(arg->user_priority);
  30652. ++ ac->service_interval = __cpu_to_le32(arg->service_interval);
  30653. ++ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
  30654. ++ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
  30655. ++
  30656. ++ ath10k_dbg(ar, ATH10K_DBG_WMI,
  30657. ++ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
  30658. ++ ac->wmm_ac, ac->user_priority, ac->service_interval,
  30659. ++ ac->suspend_interval, ac->delay_interval);
  30660. ++
  30661. ++ return ptr + sizeof(*tlv) + sizeof(*ac);
  30662. ++}
  30663. ++
  30664. ++static struct sk_buff *
  30665. ++ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  30666. ++ const u8 peer_addr[ETH_ALEN],
  30667. ++ const struct wmi_sta_uapsd_auto_trig_arg *args,
  30668. ++ u32 num_ac)
  30669. ++{
  30670. ++ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
  30671. ++ struct wmi_sta_uapsd_auto_trig_param *ac;
  30672. ++ struct wmi_tlv *tlv;
  30673. ++ struct sk_buff *skb;
  30674. ++ size_t len;
  30675. ++ size_t ac_tlv_len;
  30676. ++ void *ptr;
  30677. ++ int i;
  30678. ++
  30679. ++ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
  30680. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  30681. ++ sizeof(*tlv) + ac_tlv_len;
  30682. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30683. ++ if (!skb)
  30684. ++ return ERR_PTR(-ENOMEM);
  30685. ++
  30686. ++ ptr = (void *)skb->data;
  30687. ++ tlv = ptr;
  30688. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
  30689. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30690. ++ cmd = (void *)tlv->value;
  30691. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30692. ++ cmd->num_ac = __cpu_to_le32(num_ac);
  30693. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  30694. ++
  30695. ++ ptr += sizeof(*tlv);
  30696. ++ ptr += sizeof(*cmd);
  30697. ++
  30698. ++ tlv = ptr;
  30699. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  30700. ++ tlv->len = __cpu_to_le16(ac_tlv_len);
  30701. ++ ac = (void *)tlv->value;
  30702. ++
  30703. ++ ptr += sizeof(*tlv);
  30704. ++ for (i = 0; i < num_ac; i++)
  30705. ++ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
  30706. ++
  30707. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
  30708. ++ return skb;
  30709. ++}
  30710. ++
  30711. ++static void *ath10k_wmi_tlv_put_wmm(void *ptr,
  30712. ++ const struct wmi_wmm_params_arg *arg)
  30713. ++{
  30714. ++ struct wmi_wmm_params *wmm;
  30715. ++ struct wmi_tlv *tlv;
  30716. ++
  30717. ++ tlv = ptr;
  30718. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
  30719. ++ tlv->len = __cpu_to_le16(sizeof(*wmm));
  30720. ++ wmm = (void *)tlv->value;
  30721. ++ ath10k_wmi_set_wmm_param(wmm, arg);
  30722. ++
  30723. ++ return ptr + sizeof(*tlv) + sizeof(*wmm);
  30724. ++}
  30725. ++
  30726. ++static struct sk_buff *
  30727. ++ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  30728. ++ const struct wmi_wmm_params_all_arg *arg)
  30729. ++{
  30730. ++ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
  30731. ++ struct wmi_tlv *tlv;
  30732. ++ struct sk_buff *skb;
  30733. ++ size_t len;
  30734. ++ void *ptr;
  30735. ++
  30736. ++ len = sizeof(*tlv) + sizeof(*cmd);
  30737. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30738. ++ if (!skb)
  30739. ++ return ERR_PTR(-ENOMEM);
  30740. ++
  30741. ++ ptr = (void *)skb->data;
  30742. ++ tlv = ptr;
  30743. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
  30744. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30745. ++ cmd = (void *)tlv->value;
  30746. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30747. ++
  30748. ++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
  30749. ++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
  30750. ++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
  30751. ++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
  30752. ++
  30753. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
  30754. ++ return skb;
  30755. ++}
  30756. ++
  30757. ++static struct sk_buff *
  30758. ++ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
  30759. ++ const struct wmi_sta_keepalive_arg *arg)
  30760. ++{
  30761. ++ struct wmi_tlv_sta_keepalive_cmd *cmd;
  30762. ++ struct wmi_sta_keepalive_arp_resp *arp;
  30763. ++ struct sk_buff *skb;
  30764. ++ struct wmi_tlv *tlv;
  30765. ++ void *ptr;
  30766. ++ size_t len;
  30767. ++
  30768. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  30769. ++ sizeof(*tlv) + sizeof(*arp);
  30770. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30771. ++ if (!skb)
  30772. ++ return ERR_PTR(-ENOMEM);
  30773. ++
  30774. ++ ptr = (void *)skb->data;
  30775. ++ tlv = ptr;
  30776. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
  30777. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30778. ++ cmd = (void *)tlv->value;
  30779. ++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  30780. ++ cmd->enabled = __cpu_to_le32(arg->enabled);
  30781. ++ cmd->method = __cpu_to_le32(arg->method);
  30782. ++ cmd->interval = __cpu_to_le32(arg->interval);
  30783. ++
  30784. ++ ptr += sizeof(*tlv);
  30785. ++ ptr += sizeof(*cmd);
  30786. ++
  30787. ++ tlv = ptr;
  30788. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
  30789. ++ tlv->len = __cpu_to_le16(sizeof(*arp));
  30790. ++ arp = (void *)tlv->value;
  30791. ++
  30792. ++ arp->src_ip4_addr = arg->src_ip4_addr;
  30793. ++ arp->dest_ip4_addr = arg->dest_ip4_addr;
  30794. ++ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
  30795. ++
  30796. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
  30797. ++ arg->vdev_id, arg->enabled, arg->method, arg->interval);
  30798. ++ return skb;
  30799. ++}
  30800. ++
  30801. ++static struct sk_buff *
  30802. ++ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
  30803. ++ const u8 peer_addr[ETH_ALEN])
  30804. ++{
  30805. ++ struct wmi_tlv_peer_create_cmd *cmd;
  30806. ++ struct wmi_tlv *tlv;
  30807. ++ struct sk_buff *skb;
  30808. ++
  30809. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30810. ++ if (!skb)
  30811. ++ return ERR_PTR(-ENOMEM);
  30812. ++
  30813. ++ tlv = (void *)skb->data;
  30814. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
  30815. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30816. ++ cmd = (void *)tlv->value;
  30817. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30818. ++ cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
  30819. ++ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
  30820. ++
  30821. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
  30822. ++ return skb;
  30823. ++}
  30824. ++
  30825. ++static struct sk_buff *
  30826. ++ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
  30827. ++ const u8 peer_addr[ETH_ALEN])
  30828. ++{
  30829. ++ struct wmi_peer_delete_cmd *cmd;
  30830. ++ struct wmi_tlv *tlv;
  30831. ++ struct sk_buff *skb;
  30832. ++
  30833. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30834. ++ if (!skb)
  30835. ++ return ERR_PTR(-ENOMEM);
  30836. ++
  30837. ++ tlv = (void *)skb->data;
  30838. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
  30839. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30840. ++ cmd = (void *)tlv->value;
  30841. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30842. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  30843. ++
  30844. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
  30845. ++ return skb;
  30846. ++}
  30847. ++
  30848. ++static struct sk_buff *
  30849. ++ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
  30850. ++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  30851. ++{
  30852. ++ struct wmi_peer_flush_tids_cmd *cmd;
  30853. ++ struct wmi_tlv *tlv;
  30854. ++ struct sk_buff *skb;
  30855. ++
  30856. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30857. ++ if (!skb)
  30858. ++ return ERR_PTR(-ENOMEM);
  30859. ++
  30860. ++ tlv = (void *)skb->data;
  30861. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
  30862. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30863. ++ cmd = (void *)tlv->value;
  30864. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30865. ++ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  30866. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  30867. ++
  30868. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
  30869. ++ return skb;
  30870. ++}
  30871. ++
  30872. ++static struct sk_buff *
  30873. ++ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
  30874. ++ const u8 *peer_addr,
  30875. ++ enum wmi_peer_param param_id,
  30876. ++ u32 param_value)
  30877. ++{
  30878. ++ struct wmi_peer_set_param_cmd *cmd;
  30879. ++ struct wmi_tlv *tlv;
  30880. ++ struct sk_buff *skb;
  30881. ++
  30882. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30883. ++ if (!skb)
  30884. ++ return ERR_PTR(-ENOMEM);
  30885. ++
  30886. ++ tlv = (void *)skb->data;
  30887. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
  30888. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30889. ++ cmd = (void *)tlv->value;
  30890. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  30891. ++ cmd->param_id = __cpu_to_le32(param_id);
  30892. ++ cmd->param_value = __cpu_to_le32(param_value);
  30893. ++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  30894. ++
  30895. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
  30896. ++ return skb;
  30897. ++}
  30898. ++
  30899. ++static struct sk_buff *
  30900. ++ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
  30901. ++ const struct wmi_peer_assoc_complete_arg *arg)
  30902. ++{
  30903. ++ struct wmi_tlv_peer_assoc_cmd *cmd;
  30904. ++ struct wmi_vht_rate_set *vht_rate;
  30905. ++ struct wmi_tlv *tlv;
  30906. ++ struct sk_buff *skb;
  30907. ++ size_t len, legacy_rate_len, ht_rate_len;
  30908. ++ void *ptr;
  30909. ++
  30910. ++ if (arg->peer_mpdu_density > 16)
  30911. ++ return ERR_PTR(-EINVAL);
  30912. ++ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  30913. ++ return ERR_PTR(-EINVAL);
  30914. ++ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  30915. ++ return ERR_PTR(-EINVAL);
  30916. ++
  30917. ++ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
  30918. ++ sizeof(__le32));
  30919. ++ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
  30920. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  30921. ++ (sizeof(*tlv) + legacy_rate_len) +
  30922. ++ (sizeof(*tlv) + ht_rate_len) +
  30923. ++ (sizeof(*tlv) + sizeof(*vht_rate));
  30924. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  30925. ++ if (!skb)
  30926. ++ return ERR_PTR(-ENOMEM);
  30927. ++
  30928. ++ ptr = (void *)skb->data;
  30929. ++ tlv = ptr;
  30930. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
  30931. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  30932. ++ cmd = (void *)tlv->value;
  30933. ++
  30934. ++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  30935. ++ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  30936. ++ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
  30937. ++ cmd->flags = __cpu_to_le32(arg->peer_flags);
  30938. ++ cmd->caps = __cpu_to_le32(arg->peer_caps);
  30939. ++ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  30940. ++ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  30941. ++ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  30942. ++ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  30943. ++ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  30944. ++ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  30945. ++ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  30946. ++ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
  30947. ++ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  30948. ++ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
  30949. ++ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
  30950. ++
  30951. ++ ptr += sizeof(*tlv);
  30952. ++ ptr += sizeof(*cmd);
  30953. ++
  30954. ++ tlv = ptr;
  30955. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  30956. ++ tlv->len = __cpu_to_le16(legacy_rate_len);
  30957. ++ memcpy(tlv->value, arg->peer_legacy_rates.rates,
  30958. ++ arg->peer_legacy_rates.num_rates);
  30959. ++
  30960. ++ ptr += sizeof(*tlv);
  30961. ++ ptr += legacy_rate_len;
  30962. ++
  30963. ++ tlv = ptr;
  30964. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  30965. ++ tlv->len = __cpu_to_le16(ht_rate_len);
  30966. ++ memcpy(tlv->value, arg->peer_ht_rates.rates,
  30967. ++ arg->peer_ht_rates.num_rates);
  30968. ++
  30969. ++ ptr += sizeof(*tlv);
  30970. ++ ptr += ht_rate_len;
  30971. ++
  30972. ++ tlv = ptr;
  30973. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
  30974. ++ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
  30975. ++ vht_rate = (void *)tlv->value;
  30976. ++
  30977. ++ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  30978. ++ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  30979. ++ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  30980. ++ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  30981. ++
  30982. ++ ptr += sizeof(*tlv);
  30983. ++ ptr += sizeof(*vht_rate);
  30984. ++
  30985. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
  30986. ++ return skb;
  30987. ++}
  30988. ++
  30989. ++static struct sk_buff *
  30990. ++ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
  30991. ++ enum wmi_sta_ps_mode psmode)
  30992. ++{
  30993. ++ struct wmi_sta_powersave_mode_cmd *cmd;
  30994. ++ struct wmi_tlv *tlv;
  30995. ++ struct sk_buff *skb;
  30996. ++
  30997. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  30998. ++ if (!skb)
  30999. ++ return ERR_PTR(-ENOMEM);
  31000. ++
  31001. ++ tlv = (void *)skb->data;
  31002. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
  31003. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31004. ++ cmd = (void *)tlv->value;
  31005. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31006. ++ cmd->sta_ps_mode = __cpu_to_le32(psmode);
  31007. ++
  31008. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
  31009. ++ return skb;
  31010. ++}
  31011. ++
  31012. ++static struct sk_buff *
  31013. ++ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
  31014. ++ enum wmi_sta_powersave_param param_id,
  31015. ++ u32 param_value)
  31016. ++{
  31017. ++ struct wmi_sta_powersave_param_cmd *cmd;
  31018. ++ struct wmi_tlv *tlv;
  31019. ++ struct sk_buff *skb;
  31020. ++
  31021. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  31022. ++ if (!skb)
  31023. ++ return ERR_PTR(-ENOMEM);
  31024. ++
  31025. ++ tlv = (void *)skb->data;
  31026. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
  31027. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31028. ++ cmd = (void *)tlv->value;
  31029. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31030. ++ cmd->param_id = __cpu_to_le32(param_id);
  31031. ++ cmd->param_value = __cpu_to_le32(param_value);
  31032. ++
  31033. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
  31034. ++ return skb;
  31035. ++}
  31036. ++
  31037. ++static struct sk_buff *
  31038. ++ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  31039. ++ enum wmi_ap_ps_peer_param param_id, u32 value)
  31040. ++{
  31041. ++ struct wmi_ap_ps_peer_cmd *cmd;
  31042. ++ struct wmi_tlv *tlv;
  31043. ++ struct sk_buff *skb;
  31044. ++
  31045. ++ if (!mac)
  31046. ++ return ERR_PTR(-EINVAL);
  31047. ++
  31048. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  31049. ++ if (!skb)
  31050. ++ return ERR_PTR(-ENOMEM);
  31051. ++
  31052. ++ tlv = (void *)skb->data;
  31053. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
  31054. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31055. ++ cmd = (void *)tlv->value;
  31056. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31057. ++ cmd->param_id = __cpu_to_le32(param_id);
  31058. ++ cmd->param_value = __cpu_to_le32(value);
  31059. ++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
  31060. ++
  31061. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
  31062. ++ return skb;
  31063. ++}
  31064. ++
  31065. ++static struct sk_buff *
  31066. ++ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
  31067. ++ const struct wmi_scan_chan_list_arg *arg)
  31068. ++{
  31069. ++ struct wmi_tlv_scan_chan_list_cmd *cmd;
  31070. ++ struct wmi_channel *ci;
  31071. ++ struct wmi_channel_arg *ch;
  31072. ++ struct wmi_tlv *tlv;
  31073. ++ struct sk_buff *skb;
  31074. ++ size_t chans_len, len;
  31075. ++ int i;
  31076. ++ void *ptr, *chans;
  31077. ++
  31078. ++ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
  31079. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  31080. ++ (sizeof(*tlv) + chans_len);
  31081. ++
  31082. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31083. ++ if (!skb)
  31084. ++ return ERR_PTR(-ENOMEM);
  31085. ++
  31086. ++ ptr = (void *)skb->data;
  31087. ++ tlv = ptr;
  31088. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
  31089. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31090. ++ cmd = (void *)tlv->value;
  31091. ++ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  31092. ++
  31093. ++ ptr += sizeof(*tlv);
  31094. ++ ptr += sizeof(*cmd);
  31095. ++
  31096. ++ tlv = ptr;
  31097. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  31098. ++ tlv->len = __cpu_to_le16(chans_len);
  31099. ++ chans = (void *)tlv->value;
  31100. ++
  31101. ++ for (i = 0; i < arg->n_channels; i++) {
  31102. ++ ch = &arg->channels[i];
  31103. ++
  31104. ++ tlv = chans;
  31105. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
  31106. ++ tlv->len = __cpu_to_le16(sizeof(*ci));
  31107. ++ ci = (void *)tlv->value;
  31108. ++
  31109. ++ ath10k_wmi_put_wmi_channel(ci, ch);
  31110. ++
  31111. ++ chans += sizeof(*tlv);
  31112. ++ chans += sizeof(*ci);
  31113. ++ }
  31114. ++
  31115. ++ ptr += sizeof(*tlv);
  31116. ++ ptr += chans_len;
  31117. ++
  31118. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
  31119. ++ return skb;
  31120. ++}
  31121. ++
  31122. ++static struct sk_buff *
  31123. ++ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
  31124. ++ const void *bcn, size_t bcn_len,
  31125. ++ u32 bcn_paddr, bool dtim_zero,
  31126. ++ bool deliver_cab)
  31127. ++
  31128. ++{
  31129. ++ struct wmi_bcn_tx_ref_cmd *cmd;
  31130. ++ struct wmi_tlv *tlv;
  31131. ++ struct sk_buff *skb;
  31132. ++ struct ieee80211_hdr *hdr;
  31133. ++ u16 fc;
  31134. ++
  31135. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  31136. ++ if (!skb)
  31137. ++ return ERR_PTR(-ENOMEM);
  31138. ++
  31139. ++ hdr = (struct ieee80211_hdr *)bcn;
  31140. ++ fc = le16_to_cpu(hdr->frame_control);
  31141. ++
  31142. ++ tlv = (void *)skb->data;
  31143. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
  31144. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31145. ++ cmd = (void *)tlv->value;
  31146. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31147. ++ cmd->data_len = __cpu_to_le32(bcn_len);
  31148. ++ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
  31149. ++ cmd->msdu_id = 0;
  31150. ++ cmd->frame_control = __cpu_to_le32(fc);
  31151. ++ cmd->flags = 0;
  31152. ++
  31153. ++ if (dtim_zero)
  31154. ++ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  31155. ++
  31156. ++ if (deliver_cab)
  31157. ++ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  31158. ++
  31159. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
  31160. ++ return skb;
  31161. ++}
  31162. ++
  31163. ++static struct sk_buff *
  31164. ++ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
  31165. ++ const struct wmi_wmm_params_all_arg *arg)
  31166. ++{
  31167. ++ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
  31168. ++ struct wmi_wmm_params *wmm;
  31169. ++ struct wmi_tlv *tlv;
  31170. ++ struct sk_buff *skb;
  31171. ++ size_t len;
  31172. ++ void *ptr;
  31173. ++
  31174. ++ len = (sizeof(*tlv) + sizeof(*cmd)) +
  31175. ++ (4 * (sizeof(*tlv) + sizeof(*wmm)));
  31176. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31177. ++ if (!skb)
  31178. ++ return ERR_PTR(-ENOMEM);
  31179. ++
  31180. ++ ptr = (void *)skb->data;
  31181. ++
  31182. ++ tlv = ptr;
  31183. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
  31184. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31185. ++ cmd = (void *)tlv->value;
  31186. ++
  31187. ++ /* nothing to set here */
  31188. ++
  31189. ++ ptr += sizeof(*tlv);
  31190. ++ ptr += sizeof(*cmd);
  31191. ++
  31192. ++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
  31193. ++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
  31194. ++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
  31195. ++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
  31196. ++
  31197. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
  31198. ++ return skb;
  31199. ++}
  31200. ++
  31201. ++static struct sk_buff *
  31202. ++ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
  31203. ++{
  31204. ++ struct wmi_request_stats_cmd *cmd;
  31205. ++ struct wmi_tlv *tlv;
  31206. ++ struct sk_buff *skb;
  31207. ++
  31208. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  31209. ++ if (!skb)
  31210. ++ return ERR_PTR(-ENOMEM);
  31211. ++
  31212. ++ tlv = (void *)skb->data;
  31213. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
  31214. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31215. ++ cmd = (void *)tlv->value;
  31216. ++ cmd->stats_id = __cpu_to_le32(stats_mask);
  31217. ++
  31218. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
  31219. ++ return skb;
  31220. ++}
  31221. ++
  31222. ++static struct sk_buff *
  31223. ++ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
  31224. ++ enum wmi_force_fw_hang_type type,
  31225. ++ u32 delay_ms)
  31226. ++{
  31227. ++ struct wmi_force_fw_hang_cmd *cmd;
  31228. ++ struct wmi_tlv *tlv;
  31229. ++ struct sk_buff *skb;
  31230. ++
  31231. ++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  31232. ++ if (!skb)
  31233. ++ return ERR_PTR(-ENOMEM);
  31234. ++
  31235. ++ tlv = (void *)skb->data;
  31236. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
  31237. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31238. ++ cmd = (void *)tlv->value;
  31239. ++ cmd->type = __cpu_to_le32(type);
  31240. ++ cmd->delay_ms = __cpu_to_le32(delay_ms);
  31241. ++
  31242. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
  31243. ++ return skb;
  31244. ++}
  31245. ++
  31246. ++static struct sk_buff *
  31247. ++ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
  31248. ++ u32 log_level) {
  31249. ++ struct wmi_tlv_dbglog_cmd *cmd;
  31250. ++ struct wmi_tlv *tlv;
  31251. ++ struct sk_buff *skb;
  31252. ++ size_t len, bmap_len;
  31253. ++ u32 value;
  31254. ++ void *ptr;
  31255. ++
  31256. ++ if (module_enable) {
  31257. ++ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
  31258. ++ module_enable,
  31259. ++ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
  31260. ++ } else {
  31261. ++ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
  31262. ++ WMI_TLV_DBGLOG_ALL_MODULES,
  31263. ++ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
  31264. ++ }
  31265. ++
  31266. ++ bmap_len = 0;
  31267. ++ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
  31268. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31269. ++ if (!skb)
  31270. ++ return ERR_PTR(-ENOMEM);
  31271. ++
  31272. ++ ptr = (void *)skb->data;
  31273. ++
  31274. ++ tlv = ptr;
  31275. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
  31276. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31277. ++ cmd = (void *)tlv->value;
  31278. ++ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
  31279. ++ cmd->value = __cpu_to_le32(value);
  31280. ++
  31281. ++ ptr += sizeof(*tlv);
  31282. ++ ptr += sizeof(*cmd);
  31283. ++
  31284. ++ tlv = ptr;
  31285. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  31286. ++ tlv->len = __cpu_to_le16(bmap_len);
  31287. ++
  31288. ++ /* nothing to do here */
  31289. ++
  31290. ++ ptr += sizeof(*tlv);
  31291. ++ ptr += sizeof(bmap_len);
  31292. ++
  31293. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
  31294. ++ return skb;
  31295. ++}
  31296. ++
  31297. ++static struct sk_buff *
  31298. ++ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
  31299. ++{
  31300. ++ struct wmi_tlv_pktlog_enable *cmd;
  31301. ++ struct wmi_tlv *tlv;
  31302. ++ struct sk_buff *skb;
  31303. ++ void *ptr;
  31304. ++ size_t len;
  31305. ++
  31306. ++ len = sizeof(*tlv) + sizeof(*cmd);
  31307. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31308. ++ if (!skb)
  31309. ++ return ERR_PTR(-ENOMEM);
  31310. ++
  31311. ++ ptr = (void *)skb->data;
  31312. ++ tlv = ptr;
  31313. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
  31314. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31315. ++ cmd = (void *)tlv->value;
  31316. ++ cmd->filter = __cpu_to_le32(filter);
  31317. ++
  31318. ++ ptr += sizeof(*tlv);
  31319. ++ ptr += sizeof(*cmd);
  31320. ++
  31321. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
  31322. ++ filter);
  31323. ++ return skb;
  31324. ++}
  31325. ++
  31326. ++static struct sk_buff *
  31327. ++ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
  31328. ++{
  31329. ++ struct wmi_tlv_pktlog_disable *cmd;
  31330. ++ struct wmi_tlv *tlv;
  31331. ++ struct sk_buff *skb;
  31332. ++ void *ptr;
  31333. ++ size_t len;
  31334. ++
  31335. ++ len = sizeof(*tlv) + sizeof(*cmd);
  31336. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31337. ++ if (!skb)
  31338. ++ return ERR_PTR(-ENOMEM);
  31339. ++
  31340. ++ ptr = (void *)skb->data;
  31341. ++ tlv = ptr;
  31342. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
  31343. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31344. ++ cmd = (void *)tlv->value;
  31345. ++
  31346. ++ ptr += sizeof(*tlv);
  31347. ++ ptr += sizeof(*cmd);
  31348. ++
  31349. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
  31350. ++ return skb;
  31351. ++}
  31352. ++
  31353. ++static struct sk_buff *
  31354. ++ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
  31355. ++ u32 tim_ie_offset, struct sk_buff *bcn,
  31356. ++ u32 prb_caps, u32 prb_erp, void *prb_ies,
  31357. ++ size_t prb_ies_len)
  31358. ++{
  31359. ++ struct wmi_tlv_bcn_tmpl_cmd *cmd;
  31360. ++ struct wmi_tlv_bcn_prb_info *info;
  31361. ++ struct wmi_tlv *tlv;
  31362. ++ struct sk_buff *skb;
  31363. ++ void *ptr;
  31364. ++ size_t len;
  31365. ++
  31366. ++ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
  31367. ++ return ERR_PTR(-EINVAL);
  31368. ++
  31369. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  31370. ++ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
  31371. ++ sizeof(*tlv) + roundup(bcn->len, 4);
  31372. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31373. ++ if (!skb)
  31374. ++ return ERR_PTR(-ENOMEM);
  31375. ++
  31376. ++ ptr = (void *)skb->data;
  31377. ++ tlv = ptr;
  31378. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
  31379. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31380. ++ cmd = (void *)tlv->value;
  31381. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31382. ++ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
  31383. ++ cmd->buf_len = __cpu_to_le32(bcn->len);
  31384. ++
  31385. ++ ptr += sizeof(*tlv);
  31386. ++ ptr += sizeof(*cmd);
  31387. ++
  31388. ++ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
  31389. ++ * then it is then impossible to pass original ie len.
  31390. ++ * This chunk is not used yet so if setting probe resp template yields
  31391. ++ * problems with beaconing or crashes firmware look here.
  31392. ++ */
  31393. ++ tlv = ptr;
  31394. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
  31395. ++ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
  31396. ++ info = (void *)tlv->value;
  31397. ++ info->caps = __cpu_to_le32(prb_caps);
  31398. ++ info->erp = __cpu_to_le32(prb_erp);
  31399. ++ memcpy(info->ies, prb_ies, prb_ies_len);
  31400. ++
  31401. ++ ptr += sizeof(*tlv);
  31402. ++ ptr += sizeof(*info);
  31403. ++ ptr += prb_ies_len;
  31404. ++
  31405. ++ tlv = ptr;
  31406. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  31407. ++ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
  31408. ++ memcpy(tlv->value, bcn->data, bcn->len);
  31409. ++
  31410. ++ /* FIXME: Adjust TSF? */
  31411. ++
  31412. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
  31413. ++ vdev_id);
  31414. ++ return skb;
  31415. ++}
  31416. ++
  31417. ++static struct sk_buff *
  31418. ++ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
  31419. ++ struct sk_buff *prb)
  31420. ++{
  31421. ++ struct wmi_tlv_prb_tmpl_cmd *cmd;
  31422. ++ struct wmi_tlv_bcn_prb_info *info;
  31423. ++ struct wmi_tlv *tlv;
  31424. ++ struct sk_buff *skb;
  31425. ++ void *ptr;
  31426. ++ size_t len;
  31427. ++
  31428. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  31429. ++ sizeof(*tlv) + sizeof(*info) +
  31430. ++ sizeof(*tlv) + roundup(prb->len, 4);
  31431. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31432. ++ if (!skb)
  31433. ++ return ERR_PTR(-ENOMEM);
  31434. ++
  31435. ++ ptr = (void *)skb->data;
  31436. ++ tlv = ptr;
  31437. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
  31438. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31439. ++ cmd = (void *)tlv->value;
  31440. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31441. ++ cmd->buf_len = __cpu_to_le32(prb->len);
  31442. ++
  31443. ++ ptr += sizeof(*tlv);
  31444. ++ ptr += sizeof(*cmd);
  31445. ++
  31446. ++ tlv = ptr;
  31447. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
  31448. ++ tlv->len = __cpu_to_le16(sizeof(*info));
  31449. ++ info = (void *)tlv->value;
  31450. ++ info->caps = 0;
  31451. ++ info->erp = 0;
  31452. ++
  31453. ++ ptr += sizeof(*tlv);
  31454. ++ ptr += sizeof(*info);
  31455. ++
  31456. ++ tlv = ptr;
  31457. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  31458. ++ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
  31459. ++ memcpy(tlv->value, prb->data, prb->len);
  31460. ++
  31461. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
  31462. ++ vdev_id);
  31463. ++ return skb;
  31464. ++}
  31465. ++
  31466. ++static struct sk_buff *
  31467. ++ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
  31468. ++ const u8 *p2p_ie)
  31469. ++{
  31470. ++ struct wmi_tlv_p2p_go_bcn_ie *cmd;
  31471. ++ struct wmi_tlv *tlv;
  31472. ++ struct sk_buff *skb;
  31473. ++ void *ptr;
  31474. ++ size_t len;
  31475. ++
  31476. ++ len = sizeof(*tlv) + sizeof(*cmd) +
  31477. ++ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
  31478. ++ skb = ath10k_wmi_alloc_skb(ar, len);
  31479. ++ if (!skb)
  31480. ++ return ERR_PTR(-ENOMEM);
  31481. ++
  31482. ++ ptr = (void *)skb->data;
  31483. ++ tlv = ptr;
  31484. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
  31485. ++ tlv->len = __cpu_to_le16(sizeof(*cmd));
  31486. ++ cmd = (void *)tlv->value;
  31487. ++ cmd->vdev_id = __cpu_to_le32(vdev_id);
  31488. ++ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
  31489. ++
  31490. ++ ptr += sizeof(*tlv);
  31491. ++ ptr += sizeof(*cmd);
  31492. ++
  31493. ++ tlv = ptr;
  31494. ++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  31495. ++ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
  31496. ++ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
  31497. ++
  31498. ++ ptr += sizeof(*tlv);
  31499. ++ ptr += roundup(p2p_ie[1] + 2, 4);
  31500. ++
  31501. ++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
  31502. ++ vdev_id);
  31503. ++ return skb;
  31504. ++}
  31505. ++
  31506. ++/****************/
  31507. ++/* TLV mappings */
  31508. ++/****************/
  31509. ++
  31510. ++static struct wmi_cmd_map wmi_tlv_cmd_map = {
  31511. ++ .init_cmdid = WMI_TLV_INIT_CMDID,
  31512. ++ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
  31513. ++ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
  31514. ++ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
  31515. ++ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
  31516. ++ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
  31517. ++ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
  31518. ++ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
  31519. ++ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
  31520. ++ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
  31521. ++ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
  31522. ++ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
  31523. ++ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
  31524. ++ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
  31525. ++ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
  31526. ++ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  31527. ++ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
  31528. ++ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
  31529. ++ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
  31530. ++ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
  31531. ++ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
  31532. ++ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
  31533. ++ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
  31534. ++ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
  31535. ++ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
  31536. ++ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
  31537. ++ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
  31538. ++ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
  31539. ++ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
  31540. ++ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
  31541. ++ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
  31542. ++ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
  31543. ++ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
  31544. ++ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
  31545. ++ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
  31546. ++ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
  31547. ++ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
  31548. ++ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
  31549. ++ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
  31550. ++ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
  31551. ++ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
  31552. ++ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
  31553. ++ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
  31554. ++ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
  31555. ++ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
  31556. ++ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
  31557. ++ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
  31558. ++ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
  31559. ++ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
  31560. ++ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
  31561. ++ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
  31562. ++ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
  31563. ++ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
  31564. ++ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
  31565. ++ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
  31566. ++ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
  31567. ++ .roam_scan_rssi_change_threshold =
  31568. ++ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  31569. ++ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
  31570. ++ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
  31571. ++ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
  31572. ++ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
  31573. ++ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
  31574. ++ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
  31575. ++ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
  31576. ++ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
  31577. ++ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
  31578. ++ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
  31579. ++ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
  31580. ++ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
  31581. ++ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
  31582. ++ .wlan_profile_set_hist_intvl_cmdid =
  31583. ++ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  31584. ++ .wlan_profile_get_profile_data_cmdid =
  31585. ++ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  31586. ++ .wlan_profile_enable_profile_id_cmdid =
  31587. ++ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  31588. ++ .wlan_profile_list_profile_id_cmdid =
  31589. ++ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  31590. ++ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
  31591. ++ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
  31592. ++ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
  31593. ++ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
  31594. ++ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
  31595. ++ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
  31596. ++ .wow_enable_disable_wake_event_cmdid =
  31597. ++ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  31598. ++ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
  31599. ++ .wow_hostwakeup_from_sleep_cmdid =
  31600. ++ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  31601. ++ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
  31602. ++ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
  31603. ++ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
  31604. ++ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
  31605. ++ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
  31606. ++ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
  31607. ++ .network_list_offload_config_cmdid =
  31608. ++ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  31609. ++ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
  31610. ++ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
  31611. ++ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
  31612. ++ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
  31613. ++ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
  31614. ++ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
  31615. ++ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
  31616. ++ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
  31617. ++ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
  31618. ++ .echo_cmdid = WMI_TLV_ECHO_CMDID,
  31619. ++ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
  31620. ++ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
  31621. ++ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
  31622. ++ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
  31623. ++ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
  31624. ++ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
  31625. ++ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
  31626. ++ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
  31627. ++ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
  31628. ++ .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
  31629. ++ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
  31630. ++};
  31631. ++
  31632. ++static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
  31633. ++ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
  31634. ++ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
  31635. ++ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
  31636. ++ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
  31637. ++ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
  31638. ++ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
  31639. ++ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
  31640. ++ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  31641. ++ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
  31642. ++ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
  31643. ++ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  31644. ++ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
  31645. ++ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
  31646. ++ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  31647. ++ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
  31648. ++ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
  31649. ++ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
  31650. ++ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
  31651. ++ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
  31652. ++ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  31653. ++ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  31654. ++ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
  31655. ++ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  31656. ++ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
  31657. ++ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
  31658. ++ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  31659. ++ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  31660. ++ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  31661. ++ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  31662. ++ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  31663. ++ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  31664. ++ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  31665. ++ .bcnflt_stats_update_period =
  31666. ++ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  31667. ++ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
  31668. ++ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
  31669. ++ .dcs = WMI_TLV_PDEV_PARAM_DCS,
  31670. ++ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
  31671. ++ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
  31672. ++ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
  31673. ++ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
  31674. ++ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
  31675. ++ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
  31676. ++ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
  31677. ++ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
  31678. ++ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
  31679. ++ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
  31680. ++ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
  31681. ++ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
  31682. ++ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
  31683. ++};
  31684. ++
  31685. ++static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
  31686. ++ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
  31687. ++ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  31688. ++ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
  31689. ++ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
  31690. ++ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
  31691. ++ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
  31692. ++ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
  31693. ++ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
  31694. ++ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
  31695. ++ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
  31696. ++ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
  31697. ++ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
  31698. ++ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
  31699. ++ .wmi_vdev_oc_scheduler_air_time_limit =
  31700. ++ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  31701. ++ .wds = WMI_TLV_VDEV_PARAM_WDS,
  31702. ++ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
  31703. ++ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
  31704. ++ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
  31705. ++ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
  31706. ++ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
  31707. ++ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
  31708. ++ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
  31709. ++ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
  31710. ++ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
  31711. ++ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
  31712. ++ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
  31713. ++ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
  31714. ++ .sgi = WMI_TLV_VDEV_PARAM_SGI,
  31715. ++ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
  31716. ++ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
  31717. ++ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
  31718. ++ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
  31719. ++ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
  31720. ++ .nss = WMI_TLV_VDEV_PARAM_NSS,
  31721. ++ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
  31722. ++ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
  31723. ++ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
  31724. ++ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
  31725. ++ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  31726. ++ .ap_keepalive_min_idle_inactive_time_secs =
  31727. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  31728. ++ .ap_keepalive_max_idle_inactive_time_secs =
  31729. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  31730. ++ .ap_keepalive_max_unresponsive_time_secs =
  31731. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  31732. ++ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
  31733. ++ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
  31734. ++ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
  31735. ++ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
  31736. ++ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
  31737. ++ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
  31738. ++ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
  31739. ++ .ap_detect_out_of_sync_sleeping_sta_time_secs =
  31740. ++ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
  31741. ++};
  31742. ++
  31743. ++static const struct wmi_ops wmi_tlv_ops = {
  31744. ++ .rx = ath10k_wmi_tlv_op_rx,
  31745. ++ .map_svc = wmi_tlv_svc_map,
  31746. ++
  31747. ++ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
  31748. ++ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
  31749. ++ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
  31750. ++ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
  31751. ++ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
  31752. ++ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
  31753. ++ .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
  31754. ++ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
  31755. ++ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
  31756. ++ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
  31757. ++
  31758. ++ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
  31759. ++ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
  31760. ++ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
  31761. ++ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
  31762. ++ .gen_init = ath10k_wmi_tlv_op_gen_init,
  31763. ++ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
  31764. ++ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
  31765. ++ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
  31766. ++ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
  31767. ++ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
  31768. ++ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
  31769. ++ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
  31770. ++ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
  31771. ++ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
  31772. ++ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
  31773. ++ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
  31774. ++ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
  31775. ++ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
  31776. ++ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
  31777. ++ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
  31778. ++ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
  31779. ++ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
  31780. ++ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
  31781. ++ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
  31782. ++ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
  31783. ++ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
  31784. ++ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
  31785. ++ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
  31786. ++ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
  31787. ++ /* .gen_mgmt_tx = not implemented; HTT is used */
  31788. ++ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
  31789. ++ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
  31790. ++ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
  31791. ++ /* .gen_pdev_set_quiet_mode not implemented */
  31792. ++ /* .gen_pdev_get_temperature not implemented */
  31793. ++ /* .gen_addba_clear_resp not implemented */
  31794. ++ /* .gen_addba_send not implemented */
  31795. ++ /* .gen_addba_set_resp not implemented */
  31796. ++ /* .gen_delba_send not implemented */
  31797. ++ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
  31798. ++ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
  31799. ++ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
  31800. ++ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
  31801. ++ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
  31802. ++};
  31803. ++
  31804. ++/************/
  31805. ++/* TLV init */
  31806. ++/************/
  31807. ++
  31808. ++void ath10k_wmi_tlv_attach(struct ath10k *ar)
  31809. ++{
  31810. ++ ar->wmi.cmd = &wmi_tlv_cmd_map;
  31811. ++ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
  31812. ++ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
  31813. ++ ar->wmi.ops = &wmi_tlv_ops;
  31814. ++}
  31815. +--- /dev/null
  31816. ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
  31817. +@@ -0,0 +1,1459 @@
  31818. ++/*
  31819. ++ * Copyright (c) 2005-2011 Atheros Communications Inc.
  31820. ++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  31821. ++ *
  31822. ++ * Permission to use, copy, modify, and/or distribute this software for any
  31823. ++ * purpose with or without fee is hereby granted, provided that the above
  31824. ++ * copyright notice and this permission notice appear in all copies.
  31825. ++ *
  31826. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  31827. ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  31828. ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  31829. ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  31830. ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  31831. ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  31832. ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  31833. ++ */
  31834. ++#ifndef _WMI_TLV_H
  31835. ++#define _WMI_TLV_H
  31836. ++
  31837. ++#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
  31838. ++#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
  31839. ++#define WMI_TLV_CMD_UNSUPPORTED 0
  31840. ++#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
  31841. ++#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
  31842. ++
  31843. ++enum wmi_tlv_grp_id {
  31844. ++ WMI_TLV_GRP_START = 0x3,
  31845. ++ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
  31846. ++ WMI_TLV_GRP_PDEV,
  31847. ++ WMI_TLV_GRP_VDEV,
  31848. ++ WMI_TLV_GRP_PEER,
  31849. ++ WMI_TLV_GRP_MGMT,
  31850. ++ WMI_TLV_GRP_BA_NEG,
  31851. ++ WMI_TLV_GRP_STA_PS,
  31852. ++ WMI_TLV_GRP_DFS,
  31853. ++ WMI_TLV_GRP_ROAM,
  31854. ++ WMI_TLV_GRP_OFL_SCAN,
  31855. ++ WMI_TLV_GRP_P2P,
  31856. ++ WMI_TLV_GRP_AP_PS,
  31857. ++ WMI_TLV_GRP_RATECTL,
  31858. ++ WMI_TLV_GRP_PROFILE,
  31859. ++ WMI_TLV_GRP_SUSPEND,
  31860. ++ WMI_TLV_GRP_BCN_FILTER,
  31861. ++ WMI_TLV_GRP_WOW,
  31862. ++ WMI_TLV_GRP_RTT,
  31863. ++ WMI_TLV_GRP_SPECTRAL,
  31864. ++ WMI_TLV_GRP_STATS,
  31865. ++ WMI_TLV_GRP_ARP_NS_OFL,
  31866. ++ WMI_TLV_GRP_NLO_OFL,
  31867. ++ WMI_TLV_GRP_GTK_OFL,
  31868. ++ WMI_TLV_GRP_CSA_OFL,
  31869. ++ WMI_TLV_GRP_CHATTER,
  31870. ++ WMI_TLV_GRP_TID_ADDBA,
  31871. ++ WMI_TLV_GRP_MISC,
  31872. ++ WMI_TLV_GRP_GPIO,
  31873. ++ WMI_TLV_GRP_FWTEST,
  31874. ++ WMI_TLV_GRP_TDLS,
  31875. ++ WMI_TLV_GRP_RESMGR,
  31876. ++ WMI_TLV_GRP_STA_SMPS,
  31877. ++ WMI_TLV_GRP_WLAN_HB,
  31878. ++ WMI_TLV_GRP_RMC,
  31879. ++ WMI_TLV_GRP_MHF_OFL,
  31880. ++ WMI_TLV_GRP_LOCATION_SCAN,
  31881. ++ WMI_TLV_GRP_OEM,
  31882. ++ WMI_TLV_GRP_NAN,
  31883. ++ WMI_TLV_GRP_COEX,
  31884. ++ WMI_TLV_GRP_OBSS_OFL,
  31885. ++ WMI_TLV_GRP_LPI,
  31886. ++ WMI_TLV_GRP_EXTSCAN,
  31887. ++ WMI_TLV_GRP_DHCP_OFL,
  31888. ++ WMI_TLV_GRP_IPA,
  31889. ++ WMI_TLV_GRP_MDNS_OFL,
  31890. ++ WMI_TLV_GRP_SAP_OFL,
  31891. ++};
  31892. ++
  31893. ++enum wmi_tlv_cmd_id {
  31894. ++ WMI_TLV_INIT_CMDID = 0x1,
  31895. ++ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
  31896. ++ WMI_TLV_STOP_SCAN_CMDID,
  31897. ++ WMI_TLV_SCAN_CHAN_LIST_CMDID,
  31898. ++ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
  31899. ++ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
  31900. ++ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
  31901. ++ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
  31902. ++ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
  31903. ++ WMI_TLV_PDEV_SET_PARAM_CMDID,
  31904. ++ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
  31905. ++ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
  31906. ++ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
  31907. ++ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
  31908. ++ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
  31909. ++ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
  31910. ++ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
  31911. ++ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  31912. ++ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
  31913. ++ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
  31914. ++ WMI_TLV_PDEV_DUMP_CMDID,
  31915. ++ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
  31916. ++ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
  31917. ++ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
  31918. ++ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
  31919. ++ WMI_TLV_VDEV_DELETE_CMDID,
  31920. ++ WMI_TLV_VDEV_START_REQUEST_CMDID,
  31921. ++ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
  31922. ++ WMI_TLV_VDEV_UP_CMDID,
  31923. ++ WMI_TLV_VDEV_STOP_CMDID,
  31924. ++ WMI_TLV_VDEV_DOWN_CMDID,
  31925. ++ WMI_TLV_VDEV_SET_PARAM_CMDID,
  31926. ++ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
  31927. ++ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
  31928. ++ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
  31929. ++ WMI_TLV_VDEV_WMM_DELTS_CMDID,
  31930. ++ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
  31931. ++ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
  31932. ++ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
  31933. ++ WMI_TLV_VDEV_PLMREQ_START_CMDID,
  31934. ++ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
  31935. ++ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
  31936. ++ WMI_TLV_PEER_DELETE_CMDID,
  31937. ++ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
  31938. ++ WMI_TLV_PEER_SET_PARAM_CMDID,
  31939. ++ WMI_TLV_PEER_ASSOC_CMDID,
  31940. ++ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
  31941. ++ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
  31942. ++ WMI_TLV_PEER_MCAST_GROUP_CMDID,
  31943. ++ WMI_TLV_PEER_INFO_REQ_CMDID,
  31944. ++ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
  31945. ++ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
  31946. ++ WMI_TLV_PDEV_SEND_BCN_CMDID,
  31947. ++ WMI_TLV_BCN_TMPL_CMDID,
  31948. ++ WMI_TLV_BCN_FILTER_RX_CMDID,
  31949. ++ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
  31950. ++ WMI_TLV_MGMT_TX_CMDID,
  31951. ++ WMI_TLV_PRB_TMPL_CMDID,
  31952. ++ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
  31953. ++ WMI_TLV_ADDBA_SEND_CMDID,
  31954. ++ WMI_TLV_ADDBA_STATUS_CMDID,
  31955. ++ WMI_TLV_DELBA_SEND_CMDID,
  31956. ++ WMI_TLV_ADDBA_SET_RESP_CMDID,
  31957. ++ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
  31958. ++ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
  31959. ++ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
  31960. ++ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
  31961. ++ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
  31962. ++ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
  31963. ++ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
  31964. ++ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
  31965. ++ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
  31966. ++ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
  31967. ++ WMI_TLV_ROAM_SCAN_PERIOD,
  31968. ++ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  31969. ++ WMI_TLV_ROAM_AP_PROFILE,
  31970. ++ WMI_TLV_ROAM_CHAN_LIST,
  31971. ++ WMI_TLV_ROAM_SCAN_CMD,
  31972. ++ WMI_TLV_ROAM_SYNCH_COMPLETE,
  31973. ++ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
  31974. ++ WMI_TLV_ROAM_INVOKE_CMDID,
  31975. ++ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
  31976. ++ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
  31977. ++ WMI_TLV_OFL_SCAN_PERIOD,
  31978. ++ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
  31979. ++ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
  31980. ++ WMI_TLV_P2P_GO_SET_BEACON_IE,
  31981. ++ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
  31982. ++ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
  31983. ++ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
  31984. ++ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
  31985. ++ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
  31986. ++ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
  31987. ++ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
  31988. ++ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
  31989. ++ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
  31990. ++ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
  31991. ++ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  31992. ++ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  31993. ++ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  31994. ++ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  31995. ++ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
  31996. ++ WMI_TLV_PDEV_RESUME_CMDID,
  31997. ++ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
  31998. ++ WMI_TLV_RMV_BCN_FILTER_CMDID,
  31999. ++ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
  32000. ++ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
  32001. ++ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  32002. ++ WMI_TLV_WOW_ENABLE_CMDID,
  32003. ++ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  32004. ++ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
  32005. ++ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
  32006. ++ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
  32007. ++ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
  32008. ++ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
  32009. ++ WMI_TLV_EXTWOW_ENABLE_CMDID,
  32010. ++ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
  32011. ++ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
  32012. ++ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
  32013. ++ WMI_TLV_RTT_TSF_CMDID,
  32014. ++ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
  32015. ++ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
  32016. ++ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
  32017. ++ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
  32018. ++ WMI_TLV_REQUEST_STATS_EXT_CMDID,
  32019. ++ WMI_TLV_REQUEST_LINK_STATS_CMDID,
  32020. ++ WMI_TLV_START_LINK_STATS_CMDID,
  32021. ++ WMI_TLV_CLEAR_LINK_STATS_CMDID,
  32022. ++ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
  32023. ++ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
  32024. ++ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
  32025. ++ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
  32026. ++ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
  32027. ++ WMI_TLV_APFIND_CMDID,
  32028. ++ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
  32029. ++ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
  32030. ++ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
  32031. ++ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
  32032. ++ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
  32033. ++ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
  32034. ++ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
  32035. ++ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
  32036. ++ WMI_TLV_PEER_TID_DELBA_CMDID,
  32037. ++ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
  32038. ++ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
  32039. ++ WMI_TLV_STA_KEEPALIVE_CMDID,
  32040. ++ WMI_TLV_BA_REQ_SSN_CMDID,
  32041. ++ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
  32042. ++ WMI_TLV_PDEV_UTF_CMDID,
  32043. ++ WMI_TLV_DBGLOG_CFG_CMDID,
  32044. ++ WMI_TLV_PDEV_QVIT_CMDID,
  32045. ++ WMI_TLV_PDEV_FTM_INTG_CMDID,
  32046. ++ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
  32047. ++ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
  32048. ++ WMI_TLV_FORCE_FW_HANG_CMDID,
  32049. ++ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
  32050. ++ WMI_TLV_THERMAL_MGMT_CMDID,
  32051. ++ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
  32052. ++ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
  32053. ++ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
  32054. ++ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
  32055. ++ WMI_TLV_GPIO_OUTPUT_CMDID,
  32056. ++ WMI_TLV_TXBF_CMDID,
  32057. ++ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
  32058. ++ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
  32059. ++ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
  32060. ++ WMI_TLV_UNIT_TEST_CMDID,
  32061. ++ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
  32062. ++ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
  32063. ++ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
  32064. ++ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
  32065. ++ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
  32066. ++ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
  32067. ++ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
  32068. ++ WMI_TLV_STA_SMPS_PARAM_CMDID,
  32069. ++ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
  32070. ++ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
  32071. ++ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
  32072. ++ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
  32073. ++ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
  32074. ++ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
  32075. ++ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
  32076. ++ WMI_TLV_RMC_CONFIG_CMDID,
  32077. ++ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
  32078. ++ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
  32079. ++ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
  32080. ++ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
  32081. ++ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
  32082. ++ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
  32083. ++ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
  32084. ++ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
  32085. ++ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
  32086. ++ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
  32087. ++ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
  32088. ++ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
  32089. ++ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
  32090. ++ WMI_TLV_LPI_START_SCAN_CMDID,
  32091. ++ WMI_TLV_LPI_STOP_SCAN_CMDID,
  32092. ++ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
  32093. ++ WMI_TLV_EXTSCAN_STOP_CMDID,
  32094. ++ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
  32095. ++ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
  32096. ++ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
  32097. ++ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
  32098. ++ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
  32099. ++ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
  32100. ++ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
  32101. ++ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
  32102. ++ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
  32103. ++ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
  32104. ++ WMI_TLV_MDNS_SET_FQDN_CMDID,
  32105. ++ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
  32106. ++ WMI_TLV_MDNS_GET_STATS_CMDID,
  32107. ++ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
  32108. ++};
  32109. ++
  32110. ++enum wmi_tlv_event_id {
  32111. ++ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
  32112. ++ WMI_TLV_READY_EVENTID,
  32113. ++ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
  32114. ++ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
  32115. ++ WMI_TLV_CHAN_INFO_EVENTID,
  32116. ++ WMI_TLV_PHYERR_EVENTID,
  32117. ++ WMI_TLV_PDEV_DUMP_EVENTID,
  32118. ++ WMI_TLV_TX_PAUSE_EVENTID,
  32119. ++ WMI_TLV_DFS_RADAR_EVENTID,
  32120. ++ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
  32121. ++ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
  32122. ++ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
  32123. ++ WMI_TLV_VDEV_STOPPED_EVENTID,
  32124. ++ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
  32125. ++ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
  32126. ++ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
  32127. ++ WMI_TLV_PEER_INFO_EVENTID,
  32128. ++ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
  32129. ++ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
  32130. ++ WMI_TLV_PEER_STATE_EVENTID,
  32131. ++ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
  32132. ++ WMI_TLV_HOST_SWBA_EVENTID,
  32133. ++ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
  32134. ++ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
  32135. ++ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
  32136. ++ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
  32137. ++ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
  32138. ++ WMI_TLV_BA_RSP_SSN_EVENTID,
  32139. ++ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
  32140. ++ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
  32141. ++ WMI_TLV_PROFILE_MATCH,
  32142. ++ WMI_TLV_ROAM_SYNCH_EVENTID,
  32143. ++ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
  32144. ++ WMI_TLV_P2P_NOA_EVENTID,
  32145. ++ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
  32146. ++ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
  32147. ++ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
  32148. ++ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
  32149. ++ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
  32150. ++ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
  32151. ++ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
  32152. ++ WMI_TLV_IFACE_LINK_STATS_EVENTID,
  32153. ++ WMI_TLV_PEER_LINK_STATS_EVENTID,
  32154. ++ WMI_TLV_RADIO_LINK_STATS_EVENTID,
  32155. ++ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
  32156. ++ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
  32157. ++ WMI_TLV_APFIND_EVENTID,
  32158. ++ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
  32159. ++ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
  32160. ++ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
  32161. ++ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
  32162. ++ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
  32163. ++ WMI_TLV_PDEV_UTF_EVENTID,
  32164. ++ WMI_TLV_DEBUG_MESG_EVENTID,
  32165. ++ WMI_TLV_UPDATE_STATS_EVENTID,
  32166. ++ WMI_TLV_DEBUG_PRINT_EVENTID,
  32167. ++ WMI_TLV_DCS_INTERFERENCE_EVENTID,
  32168. ++ WMI_TLV_PDEV_QVIT_EVENTID,
  32169. ++ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
  32170. ++ WMI_TLV_PDEV_FTM_INTG_EVENTID,
  32171. ++ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
  32172. ++ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
  32173. ++ WMI_TLV_THERMAL_MGMT_EVENTID,
  32174. ++ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
  32175. ++ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
  32176. ++ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
  32177. ++ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
  32178. ++ WMI_TLV_DIAG_EVENTID,
  32179. ++ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
  32180. ++ WMI_TLV_UPLOADH_EVENTID,
  32181. ++ WMI_TLV_CAPTUREH_EVENTID,
  32182. ++ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
  32183. ++ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
  32184. ++ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
  32185. ++ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
  32186. ++ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
  32187. ++ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
  32188. ++ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
  32189. ++ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
  32190. ++ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
  32191. ++ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
  32192. ++ WMI_TLV_LPI_STATUS_EVENTID,
  32193. ++ WMI_TLV_LPI_HANDOFF_EVENTID,
  32194. ++ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
  32195. ++ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
  32196. ++ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
  32197. ++ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
  32198. ++ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
  32199. ++ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
  32200. ++ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
  32201. ++ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
  32202. ++ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
  32203. ++ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
  32204. ++};
  32205. ++
  32206. ++enum wmi_tlv_pdev_param {
  32207. ++ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
  32208. ++ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
  32209. ++ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
  32210. ++ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
  32211. ++ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
  32212. ++ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
  32213. ++ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
  32214. ++ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  32215. ++ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
  32216. ++ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
  32217. ++ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  32218. ++ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
  32219. ++ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
  32220. ++ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  32221. ++ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
  32222. ++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
  32223. ++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
  32224. ++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
  32225. ++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
  32226. ++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  32227. ++ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  32228. ++ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
  32229. ++ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  32230. ++ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
  32231. ++ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
  32232. ++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  32233. ++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
  32234. ++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  32235. ++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  32236. ++ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  32237. ++ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  32238. ++ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  32239. ++ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  32240. ++ WMI_TLV_PDEV_PARAM_PMF_QOS,
  32241. ++ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
  32242. ++ WMI_TLV_PDEV_PARAM_DCS,
  32243. ++ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
  32244. ++ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
  32245. ++ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
  32246. ++ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
  32247. ++ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
  32248. ++ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
  32249. ++ WMI_TLV_PDEV_PARAM_PROXY_STA,
  32250. ++ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
  32251. ++ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
  32252. ++ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
  32253. ++ WMI_TLV_PDEV_PARAM_BURST_DUR,
  32254. ++ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
  32255. ++ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
  32256. ++ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
  32257. ++ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
  32258. ++ WMI_TLV_PDEV_PARAM_HYST_EN,
  32259. ++ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
  32260. ++ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
  32261. ++ WMI_TLV_PDEV_PARAM_LED_ENABLE,
  32262. ++ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
  32263. ++ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
  32264. ++ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
  32265. ++ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
  32266. ++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
  32267. ++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
  32268. ++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
  32269. ++};
  32270. ++
  32271. ++enum wmi_tlv_vdev_param {
  32272. ++ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
  32273. ++ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  32274. ++ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
  32275. ++ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
  32276. ++ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
  32277. ++ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
  32278. ++ WMI_TLV_VDEV_PARAM_SLOT_TIME,
  32279. ++ WMI_TLV_VDEV_PARAM_PREAMBLE,
  32280. ++ WMI_TLV_VDEV_PARAM_SWBA_TIME,
  32281. ++ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
  32282. ++ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
  32283. ++ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
  32284. ++ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
  32285. ++ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  32286. ++ WMI_TLV_VDEV_PARAM_WDS,
  32287. ++ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
  32288. ++ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
  32289. ++ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
  32290. ++ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
  32291. ++ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
  32292. ++ WMI_TLV_VDEV_PARAM_CHWIDTH,
  32293. ++ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
  32294. ++ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
  32295. ++ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
  32296. ++ WMI_TLV_VDEV_PARAM_MGMT_RATE,
  32297. ++ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
  32298. ++ WMI_TLV_VDEV_PARAM_FIXED_RATE,
  32299. ++ WMI_TLV_VDEV_PARAM_SGI,
  32300. ++ WMI_TLV_VDEV_PARAM_LDPC,
  32301. ++ WMI_TLV_VDEV_PARAM_TX_STBC,
  32302. ++ WMI_TLV_VDEV_PARAM_RX_STBC,
  32303. ++ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
  32304. ++ WMI_TLV_VDEV_PARAM_DEF_KEYID,
  32305. ++ WMI_TLV_VDEV_PARAM_NSS,
  32306. ++ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
  32307. ++ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
  32308. ++ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
  32309. ++ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
  32310. ++ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  32311. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  32312. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  32313. ++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  32314. ++ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
  32315. ++ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
  32316. ++ WMI_TLV_VDEV_PARAM_TXBF,
  32317. ++ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
  32318. ++ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
  32319. ++ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
  32320. ++ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  32321. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
  32322. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
  32323. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
  32324. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
  32325. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
  32326. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
  32327. ++ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
  32328. ++ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
  32329. ++ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
  32330. ++ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
  32331. ++ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
  32332. ++ WMI_TLV_VDEV_PARAM_MAX_RATE,
  32333. ++ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
  32334. ++ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
  32335. ++ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
  32336. ++ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
  32337. ++ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
  32338. ++ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
  32339. ++ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
  32340. ++ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
  32341. ++ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
  32342. ++ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
  32343. ++ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
  32344. ++ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
  32345. ++};
  32346. ++
  32347. ++enum wmi_tlv_tag {
  32348. ++ WMI_TLV_TAG_LAST_RESERVED = 15,
  32349. ++
  32350. ++ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
  32351. ++ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
  32352. ++ WMI_TLV_TAG_ARRAY_BYTE,
  32353. ++ WMI_TLV_TAG_ARRAY_STRUCT,
  32354. ++ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
  32355. ++ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
  32356. ++
  32357. ++ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
  32358. ++ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
  32359. ++ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
  32360. ++ WMI_TLV_TAG_STRUCT_READY_EVENT,
  32361. ++ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
  32362. ++ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
  32363. ++ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
  32364. ++ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
  32365. ++ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
  32366. ++ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
  32367. ++ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
  32368. ++ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
  32369. ++ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
  32370. ++ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
  32371. ++ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
  32372. ++ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
  32373. ++ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
  32374. ++ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
  32375. ++ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
  32376. ++ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
  32377. ++ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
  32378. ++ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
  32379. ++ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
  32380. ++ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
  32381. ++ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
  32382. ++ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
  32383. ++ WMI_TLV_TAG_STRUCT_CSA_EVENT,
  32384. ++ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
  32385. ++ WMI_TLV_TAG_STRUCT_IGTK_INFO,
  32386. ++ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
  32387. ++ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
  32388. ++ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
  32389. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
  32390. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
  32391. ++ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
  32392. ++ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
  32393. ++ WMI_TLV_TAG_STRUCT_TIM_INFO,
  32394. ++ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
  32395. ++ WMI_TLV_TAG_STRUCT_STATS_EVENT,
  32396. ++ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
  32397. ++ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
  32398. ++ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
  32399. ++ WMI_TLV_TAG_STRUCT_INIT_CMD,
  32400. ++ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
  32401. ++ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
  32402. ++ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
  32403. ++ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
  32404. ++ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
  32405. ++ WMI_TLV_TAG_STRUCT_CHANNEL,
  32406. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
  32407. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
  32408. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
  32409. ++ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
  32410. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
  32411. ++ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
  32412. ++ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
  32413. ++ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
  32414. ++ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
  32415. ++ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
  32416. ++ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
  32417. ++ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
  32418. ++ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
  32419. ++ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
  32420. ++ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
  32421. ++ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
  32422. ++ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
  32423. ++ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
  32424. ++ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
  32425. ++ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
  32426. ++ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
  32427. ++ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
  32428. ++ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
  32429. ++ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
  32430. ++ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
  32431. ++ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
  32432. ++ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
  32433. ++ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
  32434. ++ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
  32435. ++ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
  32436. ++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
  32437. ++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
  32438. ++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
  32439. ++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  32440. ++ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
  32441. ++ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
  32442. ++ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
  32443. ++ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
  32444. ++ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
  32445. ++ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
  32446. ++ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
  32447. ++ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
  32448. ++ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
  32449. ++ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
  32450. ++ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
  32451. ++ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
  32452. ++ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
  32453. ++ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
  32454. ++ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
  32455. ++ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
  32456. ++ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
  32457. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
  32458. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
  32459. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
  32460. ++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
  32461. ++ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
  32462. ++ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
  32463. ++ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
  32464. ++ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
  32465. ++ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
  32466. ++ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
  32467. ++ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
  32468. ++ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
  32469. ++ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
  32470. ++ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
  32471. ++ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
  32472. ++ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
  32473. ++ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
  32474. ++ WMI_TLV_TAG_STRUCT_ECHO_CMD,
  32475. ++ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
  32476. ++ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
  32477. ++ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
  32478. ++ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
  32479. ++ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
  32480. ++ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
  32481. ++ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
  32482. ++ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
  32483. ++ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
  32484. ++ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
  32485. ++ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
  32486. ++ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
  32487. ++ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
  32488. ++ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
  32489. ++ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
  32490. ++ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
  32491. ++ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
  32492. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
  32493. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
  32494. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
  32495. ++ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
  32496. ++ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
  32497. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
  32498. ++ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
  32499. ++ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
  32500. ++ WMI_TLV_TAG_STRUCT_AP_PROFILE,
  32501. ++ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
  32502. ++ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
  32503. ++ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
  32504. ++ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
  32505. ++ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
  32506. ++ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
  32507. ++ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
  32508. ++ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
  32509. ++ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
  32510. ++ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
  32511. ++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
  32512. ++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
  32513. ++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
  32514. ++ WMI_TLV_TAG_STRUCT_TXBF_CMD,
  32515. ++ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
  32516. ++ WMI_TLV_TAG_STRUCT_NLO_EVENT,
  32517. ++ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
  32518. ++ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
  32519. ++ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
  32520. ++ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
  32521. ++ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
  32522. ++ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
  32523. ++ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
  32524. ++ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
  32525. ++ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
  32526. ++ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
  32527. ++ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
  32528. ++ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
  32529. ++ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
  32530. ++ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
  32531. ++ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
  32532. ++ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
  32533. ++ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
  32534. ++ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
  32535. ++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
  32536. ++ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
  32537. ++ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
  32538. ++ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
  32539. ++ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
  32540. ++ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
  32541. ++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
  32542. ++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
  32543. ++ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
  32544. ++ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
  32545. ++ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
  32546. ++ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
  32547. ++ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
  32548. ++ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
  32549. ++ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
  32550. ++ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
  32551. ++ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
  32552. ++ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
  32553. ++ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
  32554. ++ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
  32555. ++ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
  32556. ++ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
  32557. ++ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
  32558. ++ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
  32559. ++ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
  32560. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
  32561. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
  32562. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
  32563. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
  32564. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
  32565. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
  32566. ++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
  32567. ++ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
  32568. ++ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
  32569. ++ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
  32570. ++ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
  32571. ++ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
  32572. ++ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
  32573. ++ WMI_TLV_TAG_STRUCT_PEER_INFO,
  32574. ++ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
  32575. ++ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
  32576. ++ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
  32577. ++ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
  32578. ++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
  32579. ++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
  32580. ++ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
  32581. ++ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
  32582. ++ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
  32583. ++ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
  32584. ++ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
  32585. ++ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
  32586. ++ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
  32587. ++ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
  32588. ++ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
  32589. ++ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
  32590. ++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
  32591. ++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
  32592. ++ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
  32593. ++ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
  32594. ++ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
  32595. ++ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
  32596. ++ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
  32597. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
  32598. ++ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
  32599. ++ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
  32600. ++ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
  32601. ++ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
  32602. ++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
  32603. ++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
  32604. ++ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
  32605. ++ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
  32606. ++ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
  32607. ++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
  32608. ++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
  32609. ++ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
  32610. ++ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
  32611. ++ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
  32612. ++ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
  32613. ++ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
  32614. ++ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
  32615. ++ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
  32616. ++ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
  32617. ++ WMI_TLV_TAG_STRUCT_RATE_STATS,
  32618. ++ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
  32619. ++ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
  32620. ++ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
  32621. ++ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
  32622. ++ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
  32623. ++ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
  32624. ++ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
  32625. ++ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
  32626. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
  32627. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
  32628. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
  32629. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
  32630. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
  32631. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
  32632. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
  32633. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
  32634. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
  32635. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
  32636. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
  32637. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
  32638. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
  32639. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
  32640. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
  32641. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
  32642. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
  32643. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
  32644. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
  32645. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
  32646. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
  32647. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
  32648. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
  32649. ++ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
  32650. ++ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
  32651. ++ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
  32652. ++ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
  32653. ++ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
  32654. ++ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
  32655. ++ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
  32656. ++ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
  32657. ++ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
  32658. ++ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
  32659. ++ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
  32660. ++ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
  32661. ++ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
  32662. ++ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
  32663. ++ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
  32664. ++ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
  32665. ++ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
  32666. ++ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
  32667. ++ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
  32668. ++ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
  32669. ++ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
  32670. ++ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
  32671. ++ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
  32672. ++ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
  32673. ++ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
  32674. ++ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
  32675. ++ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
  32676. ++ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
  32677. ++ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
  32678. ++ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
  32679. ++ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
  32680. ++ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
  32681. ++ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
  32682. ++ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
  32683. ++ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
  32684. ++ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
  32685. ++ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
  32686. ++ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
  32687. ++ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
  32688. ++ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
  32689. ++ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
  32690. ++ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
  32691. ++
  32692. ++ WMI_TLV_TAG_MAX
  32693. ++};
  32694. ++
  32695. ++enum wmi_tlv_service {
  32696. ++ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
  32697. ++ WMI_TLV_SERVICE_SCAN_OFFLOAD,
  32698. ++ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
  32699. ++ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
  32700. ++ WMI_TLV_SERVICE_STA_PWRSAVE,
  32701. ++ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
  32702. ++ WMI_TLV_SERVICE_AP_UAPSD,
  32703. ++ WMI_TLV_SERVICE_AP_DFS,
  32704. ++ WMI_TLV_SERVICE_11AC,
  32705. ++ WMI_TLV_SERVICE_BLOCKACK,
  32706. ++ WMI_TLV_SERVICE_PHYERR,
  32707. ++ WMI_TLV_SERVICE_BCN_FILTER,
  32708. ++ WMI_TLV_SERVICE_RTT,
  32709. ++ WMI_TLV_SERVICE_WOW,
  32710. ++ WMI_TLV_SERVICE_RATECTRL_CACHE,
  32711. ++ WMI_TLV_SERVICE_IRAM_TIDS,
  32712. ++ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
  32713. ++ WMI_TLV_SERVICE_NLO,
  32714. ++ WMI_TLV_SERVICE_GTK_OFFLOAD,
  32715. ++ WMI_TLV_SERVICE_SCAN_SCH,
  32716. ++ WMI_TLV_SERVICE_CSA_OFFLOAD,
  32717. ++ WMI_TLV_SERVICE_CHATTER,
  32718. ++ WMI_TLV_SERVICE_COEX_FREQAVOID,
  32719. ++ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
  32720. ++ WMI_TLV_SERVICE_FORCE_FW_HANG,
  32721. ++ WMI_TLV_SERVICE_GPIO,
  32722. ++ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
  32723. ++ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
  32724. ++ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
  32725. ++ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
  32726. ++ WMI_TLV_SERVICE_TX_ENCAP,
  32727. ++ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
  32728. ++ WMI_TLV_SERVICE_EARLY_RX,
  32729. ++ WMI_TLV_SERVICE_STA_SMPS,
  32730. ++ WMI_TLV_SERVICE_FWTEST,
  32731. ++ WMI_TLV_SERVICE_STA_WMMAC,
  32732. ++ WMI_TLV_SERVICE_TDLS,
  32733. ++ WMI_TLV_SERVICE_BURST,
  32734. ++ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
  32735. ++ WMI_TLV_SERVICE_ADAPTIVE_OCS,
  32736. ++ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
  32737. ++ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
  32738. ++ WMI_TLV_SERVICE_WLAN_HB,
  32739. ++ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
  32740. ++ WMI_TLV_SERVICE_BATCH_SCAN,
  32741. ++ WMI_TLV_SERVICE_QPOWER,
  32742. ++ WMI_TLV_SERVICE_PLMREQ,
  32743. ++ WMI_TLV_SERVICE_THERMAL_MGMT,
  32744. ++ WMI_TLV_SERVICE_RMC,
  32745. ++ WMI_TLV_SERVICE_MHF_OFFLOAD,
  32746. ++ WMI_TLV_SERVICE_COEX_SAR,
  32747. ++ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
  32748. ++ WMI_TLV_SERVICE_NAN,
  32749. ++ WMI_TLV_SERVICE_L1SS_STAT,
  32750. ++ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
  32751. ++ WMI_TLV_SERVICE_OBSS_SCAN,
  32752. ++ WMI_TLV_SERVICE_TDLS_OFFCHAN,
  32753. ++ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
  32754. ++ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
  32755. ++ WMI_TLV_SERVICE_IBSS_PWRSAVE,
  32756. ++ WMI_TLV_SERVICE_LPASS,
  32757. ++ WMI_TLV_SERVICE_EXTSCAN,
  32758. ++ WMI_TLV_SERVICE_D0WOW,
  32759. ++ WMI_TLV_SERVICE_HSOFFLOAD,
  32760. ++ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
  32761. ++ WMI_TLV_SERVICE_RX_FULL_REORDER,
  32762. ++ WMI_TLV_SERVICE_DHCP_OFFLOAD,
  32763. ++ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
  32764. ++ WMI_TLV_SERVICE_MDNS_OFFLOAD,
  32765. ++ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
  32766. ++};
  32767. ++
  32768. ++#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
  32769. ++ ((svc_id) < (len) && \
  32770. ++ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
  32771. ++ BIT((svc_id)%(sizeof(u32))))
  32772. ++
  32773. ++#define SVCMAP(x, y, len) \
  32774. ++ do { \
  32775. ++ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
  32776. ++ __set_bit(y, out); \
  32777. ++ } while (0)
  32778. ++
  32779. ++static inline void
  32780. ++wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
  32781. ++{
  32782. ++ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
  32783. ++ WMI_SERVICE_BEACON_OFFLOAD, len);
  32784. ++ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
  32785. ++ WMI_SERVICE_SCAN_OFFLOAD, len);
  32786. ++ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
  32787. ++ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
  32788. ++ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
  32789. ++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
  32790. ++ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
  32791. ++ WMI_SERVICE_STA_PWRSAVE, len);
  32792. ++ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
  32793. ++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
  32794. ++ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
  32795. ++ WMI_SERVICE_AP_UAPSD, len);
  32796. ++ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
  32797. ++ WMI_SERVICE_AP_DFS, len);
  32798. ++ SVCMAP(WMI_TLV_SERVICE_11AC,
  32799. ++ WMI_SERVICE_11AC, len);
  32800. ++ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
  32801. ++ WMI_SERVICE_BLOCKACK, len);
  32802. ++ SVCMAP(WMI_TLV_SERVICE_PHYERR,
  32803. ++ WMI_SERVICE_PHYERR, len);
  32804. ++ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
  32805. ++ WMI_SERVICE_BCN_FILTER, len);
  32806. ++ SVCMAP(WMI_TLV_SERVICE_RTT,
  32807. ++ WMI_SERVICE_RTT, len);
  32808. ++ SVCMAP(WMI_TLV_SERVICE_WOW,
  32809. ++ WMI_SERVICE_WOW, len);
  32810. ++ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
  32811. ++ WMI_SERVICE_RATECTRL_CACHE, len);
  32812. ++ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
  32813. ++ WMI_SERVICE_IRAM_TIDS, len);
  32814. ++ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
  32815. ++ WMI_SERVICE_ARPNS_OFFLOAD, len);
  32816. ++ SVCMAP(WMI_TLV_SERVICE_NLO,
  32817. ++ WMI_SERVICE_NLO, len);
  32818. ++ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
  32819. ++ WMI_SERVICE_GTK_OFFLOAD, len);
  32820. ++ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
  32821. ++ WMI_SERVICE_SCAN_SCH, len);
  32822. ++ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
  32823. ++ WMI_SERVICE_CSA_OFFLOAD, len);
  32824. ++ SVCMAP(WMI_TLV_SERVICE_CHATTER,
  32825. ++ WMI_SERVICE_CHATTER, len);
  32826. ++ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
  32827. ++ WMI_SERVICE_COEX_FREQAVOID, len);
  32828. ++ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
  32829. ++ WMI_SERVICE_PACKET_POWER_SAVE, len);
  32830. ++ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
  32831. ++ WMI_SERVICE_FORCE_FW_HANG, len);
  32832. ++ SVCMAP(WMI_TLV_SERVICE_GPIO,
  32833. ++ WMI_SERVICE_GPIO, len);
  32834. ++ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
  32835. ++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
  32836. ++ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
  32837. ++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
  32838. ++ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
  32839. ++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
  32840. ++ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
  32841. ++ WMI_SERVICE_STA_KEEP_ALIVE, len);
  32842. ++ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
  32843. ++ WMI_SERVICE_TX_ENCAP, len);
  32844. ++ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
  32845. ++ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
  32846. ++ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
  32847. ++ WMI_SERVICE_EARLY_RX, len);
  32848. ++ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
  32849. ++ WMI_SERVICE_STA_SMPS, len);
  32850. ++ SVCMAP(WMI_TLV_SERVICE_FWTEST,
  32851. ++ WMI_SERVICE_FWTEST, len);
  32852. ++ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
  32853. ++ WMI_SERVICE_STA_WMMAC, len);
  32854. ++ SVCMAP(WMI_TLV_SERVICE_TDLS,
  32855. ++ WMI_SERVICE_TDLS, len);
  32856. ++ SVCMAP(WMI_TLV_SERVICE_BURST,
  32857. ++ WMI_SERVICE_BURST, len);
  32858. ++ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
  32859. ++ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
  32860. ++ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
  32861. ++ WMI_SERVICE_ADAPTIVE_OCS, len);
  32862. ++ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
  32863. ++ WMI_SERVICE_BA_SSN_SUPPORT, len);
  32864. ++ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
  32865. ++ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
  32866. ++ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
  32867. ++ WMI_SERVICE_WLAN_HB, len);
  32868. ++ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
  32869. ++ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
  32870. ++ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
  32871. ++ WMI_SERVICE_BATCH_SCAN, len);
  32872. ++ SVCMAP(WMI_TLV_SERVICE_QPOWER,
  32873. ++ WMI_SERVICE_QPOWER, len);
  32874. ++ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
  32875. ++ WMI_SERVICE_PLMREQ, len);
  32876. ++ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
  32877. ++ WMI_SERVICE_THERMAL_MGMT, len);
  32878. ++ SVCMAP(WMI_TLV_SERVICE_RMC,
  32879. ++ WMI_SERVICE_RMC, len);
  32880. ++ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
  32881. ++ WMI_SERVICE_MHF_OFFLOAD, len);
  32882. ++ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
  32883. ++ WMI_SERVICE_COEX_SAR, len);
  32884. ++ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
  32885. ++ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
  32886. ++ SVCMAP(WMI_TLV_SERVICE_NAN,
  32887. ++ WMI_SERVICE_NAN, len);
  32888. ++ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
  32889. ++ WMI_SERVICE_L1SS_STAT, len);
  32890. ++ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
  32891. ++ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
  32892. ++ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
  32893. ++ WMI_SERVICE_OBSS_SCAN, len);
  32894. ++ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
  32895. ++ WMI_SERVICE_TDLS_OFFCHAN, len);
  32896. ++ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
  32897. ++ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
  32898. ++ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
  32899. ++ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
  32900. ++ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
  32901. ++ WMI_SERVICE_IBSS_PWRSAVE, len);
  32902. ++ SVCMAP(WMI_TLV_SERVICE_LPASS,
  32903. ++ WMI_SERVICE_LPASS, len);
  32904. ++ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
  32905. ++ WMI_SERVICE_EXTSCAN, len);
  32906. ++ SVCMAP(WMI_TLV_SERVICE_D0WOW,
  32907. ++ WMI_SERVICE_D0WOW, len);
  32908. ++ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
  32909. ++ WMI_SERVICE_HSOFFLOAD, len);
  32910. ++ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
  32911. ++ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
  32912. ++ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
  32913. ++ WMI_SERVICE_RX_FULL_REORDER, len);
  32914. ++ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
  32915. ++ WMI_SERVICE_DHCP_OFFLOAD, len);
  32916. ++ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
  32917. ++ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
  32918. ++ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
  32919. ++ WMI_SERVICE_MDNS_OFFLOAD, len);
  32920. ++ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
  32921. ++ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
  32922. ++}
  32923. ++
  32924. ++#undef SVCMAP
  32925. ++
  32926. ++struct wmi_tlv {
  32927. ++ __le16 len;
  32928. ++ __le16 tag;
  32929. ++ u8 value[0];
  32930. ++} __packed;
  32931. ++
  32932. ++#define WMI_TLV_MGMT_RX_NUM_RSSI 4
  32933. ++
  32934. ++struct wmi_tlv_mgmt_rx_ev {
  32935. ++ __le32 channel;
  32936. ++ __le32 snr;
  32937. ++ __le32 rate;
  32938. ++ __le32 phy_mode;
  32939. ++ __le32 buf_len;
  32940. ++ __le32 status;
  32941. ++ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
  32942. ++} __packed;
  32943. ++
  32944. ++struct wmi_tlv_abi_version {
  32945. ++ __le32 abi_ver0;
  32946. ++ __le32 abi_ver1;
  32947. ++ __le32 abi_ver_ns0;
  32948. ++ __le32 abi_ver_ns1;
  32949. ++ __le32 abi_ver_ns2;
  32950. ++ __le32 abi_ver_ns3;
  32951. ++} __packed;
  32952. ++
  32953. ++enum wmi_tlv_hw_bd_id {
  32954. ++ WMI_TLV_HW_BD_LEGACY = 0,
  32955. ++ WMI_TLV_HW_BD_QCA6174 = 1,
  32956. ++ WMI_TLV_HW_BD_QCA2582 = 2,
  32957. ++};
  32958. ++
  32959. ++struct wmi_tlv_hw_bd_info {
  32960. ++ u8 rev;
  32961. ++ u8 project_id;
  32962. ++ u8 custom_id;
  32963. ++ u8 reference_design_id;
  32964. ++} __packed;
  32965. ++
  32966. ++struct wmi_tlv_svc_rdy_ev {
  32967. ++ __le32 fw_build_vers;
  32968. ++ struct wmi_tlv_abi_version abi;
  32969. ++ __le32 phy_capability;
  32970. ++ __le32 max_frag_entry;
  32971. ++ __le32 num_rf_chains;
  32972. ++ __le32 ht_cap_info;
  32973. ++ __le32 vht_cap_info;
  32974. ++ __le32 vht_supp_mcs;
  32975. ++ __le32 hw_min_tx_power;
  32976. ++ __le32 hw_max_tx_power;
  32977. ++ __le32 sys_cap_info;
  32978. ++ __le32 min_pkt_size_enable;
  32979. ++ __le32 max_bcn_ie_size;
  32980. ++ __le32 num_mem_reqs;
  32981. ++ __le32 max_num_scan_chans;
  32982. ++ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
  32983. ++ struct wmi_tlv_hw_bd_info hw_bd_info[5];
  32984. ++} __packed;
  32985. ++
  32986. ++struct wmi_tlv_rdy_ev {
  32987. ++ struct wmi_tlv_abi_version abi;
  32988. ++ struct wmi_mac_addr mac_addr;
  32989. ++ __le32 status;
  32990. ++} __packed;
  32991. ++
  32992. ++struct wmi_tlv_resource_config {
  32993. ++ __le32 num_vdevs;
  32994. ++ __le32 num_peers;
  32995. ++ __le32 num_offload_peers;
  32996. ++ __le32 num_offload_reorder_bufs;
  32997. ++ __le32 num_peer_keys;
  32998. ++ __le32 num_tids;
  32999. ++ __le32 ast_skid_limit;
  33000. ++ __le32 tx_chain_mask;
  33001. ++ __le32 rx_chain_mask;
  33002. ++ __le32 rx_timeout_pri[4];
  33003. ++ __le32 rx_decap_mode;
  33004. ++ __le32 scan_max_pending_reqs;
  33005. ++ __le32 bmiss_offload_max_vdev;
  33006. ++ __le32 roam_offload_max_vdev;
  33007. ++ __le32 roam_offload_max_ap_profiles;
  33008. ++ __le32 num_mcast_groups;
  33009. ++ __le32 num_mcast_table_elems;
  33010. ++ __le32 mcast2ucast_mode;
  33011. ++ __le32 tx_dbg_log_size;
  33012. ++ __le32 num_wds_entries;
  33013. ++ __le32 dma_burst_size;
  33014. ++ __le32 mac_aggr_delim;
  33015. ++ __le32 rx_skip_defrag_timeout_dup_detection_check;
  33016. ++ __le32 vow_config;
  33017. ++ __le32 gtk_offload_max_vdev;
  33018. ++ __le32 num_msdu_desc;
  33019. ++ __le32 max_frag_entries;
  33020. ++ __le32 num_tdls_vdevs;
  33021. ++ __le32 num_tdls_conn_table_entries;
  33022. ++ __le32 beacon_tx_offload_max_vdev;
  33023. ++ __le32 num_multicast_filter_entries;
  33024. ++ __le32 num_wow_filters;
  33025. ++ __le32 num_keep_alive_pattern;
  33026. ++ __le32 keep_alive_pattern_size;
  33027. ++ __le32 max_tdls_concurrent_sleep_sta;
  33028. ++ __le32 max_tdls_concurrent_buffer_sta;
  33029. ++} __packed;
  33030. ++
  33031. ++struct wmi_tlv_init_cmd {
  33032. ++ struct wmi_tlv_abi_version abi;
  33033. ++ __le32 num_host_mem_chunks;
  33034. ++} __packed;
  33035. ++
  33036. ++struct wmi_tlv_pdev_set_param_cmd {
  33037. ++ __le32 pdev_id; /* not used yet */
  33038. ++ __le32 param_id;
  33039. ++ __le32 param_value;
  33040. ++} __packed;
  33041. ++
  33042. ++struct wmi_tlv_pdev_set_rd_cmd {
  33043. ++ __le32 pdev_id; /* not used yet */
  33044. ++ __le32 regd;
  33045. ++ __le32 regd_2ghz;
  33046. ++ __le32 regd_5ghz;
  33047. ++ __le32 conform_limit_2ghz;
  33048. ++ __le32 conform_limit_5ghz;
  33049. ++} __packed;
  33050. ++
  33051. ++struct wmi_tlv_scan_chan_list_cmd {
  33052. ++ __le32 num_scan_chans;
  33053. ++} __packed;
  33054. ++
  33055. ++struct wmi_tlv_start_scan_cmd {
  33056. ++ struct wmi_start_scan_common common;
  33057. ++ __le32 burst_duration_ms;
  33058. ++ __le32 num_channels;
  33059. ++ __le32 num_bssids;
  33060. ++ __le32 num_ssids;
  33061. ++ __le32 ie_len;
  33062. ++ __le32 num_probes;
  33063. ++} __packed;
  33064. ++
  33065. ++struct wmi_tlv_vdev_start_cmd {
  33066. ++ __le32 vdev_id;
  33067. ++ __le32 requestor_id;
  33068. ++ __le32 bcn_intval;
  33069. ++ __le32 dtim_period;
  33070. ++ __le32 flags;
  33071. ++ struct wmi_ssid ssid;
  33072. ++ __le32 bcn_tx_rate;
  33073. ++ __le32 bcn_tx_power;
  33074. ++ __le32 num_noa_descr;
  33075. ++ __le32 disable_hw_ack;
  33076. ++} __packed;
  33077. ++
  33078. ++enum {
  33079. ++ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
  33080. ++ WMI_TLV_PEER_TYPE_BSS = 1,
  33081. ++ WMI_TLV_PEER_TYPE_TDLS = 2,
  33082. ++ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
  33083. ++ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
  33084. ++};
  33085. ++
  33086. ++struct wmi_tlv_peer_create_cmd {
  33087. ++ __le32 vdev_id;
  33088. ++ struct wmi_mac_addr peer_addr;
  33089. ++ __le32 peer_type;
  33090. ++} __packed;
  33091. ++
  33092. ++struct wmi_tlv_peer_assoc_cmd {
  33093. ++ struct wmi_mac_addr mac_addr;
  33094. ++ __le32 vdev_id;
  33095. ++ __le32 new_assoc;
  33096. ++ __le32 assoc_id;
  33097. ++ __le32 flags;
  33098. ++ __le32 caps;
  33099. ++ __le32 listen_intval;
  33100. ++ __le32 ht_caps;
  33101. ++ __le32 max_mpdu;
  33102. ++ __le32 mpdu_density;
  33103. ++ __le32 rate_caps;
  33104. ++ __le32 nss;
  33105. ++ __le32 vht_caps;
  33106. ++ __le32 phy_mode;
  33107. ++ __le32 ht_info[2];
  33108. ++ __le32 num_legacy_rates;
  33109. ++ __le32 num_ht_rates;
  33110. ++} __packed;
  33111. ++
  33112. ++struct wmi_tlv_pdev_suspend {
  33113. ++ __le32 pdev_id; /* not used yet */
  33114. ++ __le32 opt;
  33115. ++} __packed;
  33116. ++
  33117. ++struct wmi_tlv_pdev_set_wmm_cmd {
  33118. ++ __le32 pdev_id; /* not used yet */
  33119. ++ __le32 dg_type; /* no idea.. */
  33120. ++} __packed;
  33121. ++
  33122. ++struct wmi_tlv_vdev_wmm_params {
  33123. ++ __le32 dummy;
  33124. ++ struct wmi_wmm_params params;
  33125. ++} __packed;
  33126. ++
  33127. ++struct wmi_tlv_vdev_set_wmm_cmd {
  33128. ++ __le32 vdev_id;
  33129. ++ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
  33130. ++} __packed;
  33131. ++
  33132. ++struct wmi_tlv_phyerr_ev {
  33133. ++ __le32 num_phyerrs;
  33134. ++ __le32 tsf_l32;
  33135. ++ __le32 tsf_u32;
  33136. ++ __le32 buf_len;
  33137. ++} __packed;
  33138. ++
  33139. ++enum wmi_tlv_dbglog_param {
  33140. ++ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
  33141. ++ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
  33142. ++ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
  33143. ++ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
  33144. ++ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
  33145. ++};
  33146. ++
  33147. ++enum wmi_tlv_dbglog_log_level {
  33148. ++ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
  33149. ++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
  33150. ++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
  33151. ++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
  33152. ++ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
  33153. ++ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
  33154. ++};
  33155. ++
  33156. ++#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
  33157. ++#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
  33158. ++ sizeof(__le32))
  33159. ++#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
  33160. ++#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
  33161. ++ (((module_id << 16) & 0xffff0000) | \
  33162. ++ ((log_level << 0) & 0x000000ff))
  33163. ++
  33164. ++struct wmi_tlv_dbglog_cmd {
  33165. ++ __le32 param;
  33166. ++ __le32 value;
  33167. ++} __packed;
  33168. ++
  33169. ++struct wmi_tlv_resume_cmd {
  33170. ++ __le32 reserved;
  33171. ++} __packed;
  33172. ++
  33173. ++struct wmi_tlv_req_stats_cmd {
  33174. ++ __le32 stats_id; /* wmi_stats_id */
  33175. ++ __le32 vdev_id;
  33176. ++ struct wmi_mac_addr peer_macaddr;
  33177. ++} __packed;
  33178. ++
  33179. ++struct wmi_tlv_vdev_stats {
  33180. ++ __le32 vdev_id;
  33181. ++ __le32 beacon_snr;
  33182. ++ __le32 data_snr;
  33183. ++ __le32 num_tx_frames[4]; /* per-AC */
  33184. ++ __le32 num_rx_frames;
  33185. ++ __le32 num_tx_frames_retries[4];
  33186. ++ __le32 num_tx_frames_failures[4];
  33187. ++ __le32 num_rts_fail;
  33188. ++ __le32 num_rts_success;
  33189. ++ __le32 num_rx_err;
  33190. ++ __le32 num_rx_discard;
  33191. ++ __le32 num_tx_not_acked;
  33192. ++ __le32 tx_rate_history[10];
  33193. ++ __le32 beacon_rssi_history[10];
  33194. ++} __packed;
  33195. ++
  33196. ++struct wmi_tlv_pktlog_enable {
  33197. ++ __le32 reserved;
  33198. ++ __le32 filter;
  33199. ++} __packed;
  33200. ++
  33201. ++struct wmi_tlv_pktlog_disable {
  33202. ++ __le32 reserved;
  33203. ++} __packed;
  33204. ++
  33205. ++enum wmi_tlv_bcn_tx_status {
  33206. ++ WMI_TLV_BCN_TX_STATUS_OK,
  33207. ++ WMI_TLV_BCN_TX_STATUS_XRETRY,
  33208. ++ WMI_TLV_BCN_TX_STATUS_DROP,
  33209. ++ WMI_TLV_BCN_TX_STATUS_FILTERED,
  33210. ++};
  33211. ++
  33212. ++struct wmi_tlv_bcn_tx_status_ev {
  33213. ++ __le32 vdev_id;
  33214. ++ __le32 tx_status;
  33215. ++} __packed;
  33216. ++
  33217. ++struct wmi_tlv_bcn_prb_info {
  33218. ++ __le32 caps;
  33219. ++ __le32 erp;
  33220. ++ u8 ies[0];
  33221. ++} __packed;
  33222. ++
  33223. ++struct wmi_tlv_bcn_tmpl_cmd {
  33224. ++ __le32 vdev_id;
  33225. ++ __le32 tim_ie_offset;
  33226. ++ __le32 buf_len;
  33227. ++} __packed;
  33228. ++
  33229. ++struct wmi_tlv_prb_tmpl_cmd {
  33230. ++ __le32 vdev_id;
  33231. ++ __le32 buf_len;
  33232. ++} __packed;
  33233. ++
  33234. ++struct wmi_tlv_p2p_go_bcn_ie {
  33235. ++ __le32 vdev_id;
  33236. ++ __le32 ie_len;
  33237. ++} __packed;
  33238. ++
  33239. ++enum wmi_tlv_diag_item_type {
  33240. ++ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
  33241. ++ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
  33242. ++ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
  33243. ++};
  33244. ++
  33245. ++struct wmi_tlv_diag_item {
  33246. ++ u8 type;
  33247. ++ u8 reserved;
  33248. ++ __le16 len;
  33249. ++ __le32 timestamp;
  33250. ++ __le32 code;
  33251. ++ u8 payload[0];
  33252. ++} __packed;
  33253. ++
  33254. ++struct wmi_tlv_diag_data_ev {
  33255. ++ __le32 num_items;
  33256. ++} __packed;
  33257. ++
  33258. ++struct wmi_tlv_sta_keepalive_cmd {
  33259. ++ __le32 vdev_id;
  33260. ++ __le32 enabled;
  33261. ++ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
  33262. ++ __le32 interval; /* in seconds */
  33263. ++} __packed;
  33264. ++
  33265. ++struct wmi_tlv_stats_ev {
  33266. ++ __le32 stats_id; /* WMI_STAT_ */
  33267. ++ __le32 num_pdev_stats;
  33268. ++ __le32 num_vdev_stats;
  33269. ++ __le32 num_peer_stats;
  33270. ++ __le32 num_bcnflt_stats;
  33271. ++ __le32 num_chan_stats;
  33272. ++} __packed;
  33273. ++
  33274. ++void ath10k_wmi_tlv_attach(struct ath10k *ar);
  33275. ++
  33276. ++#endif
  33277. +--- a/backport-include/linux/etherdevice.h
  33278. ++++ b/backport-include/linux/etherdevice.h
  33279. +@@ -148,6 +148,29 @@ static inline bool ether_addr_equal_unal
  33280. + return memcmp(addr1, addr2, ETH_ALEN) == 0;
  33281. + #endif
  33282. + }
  33283. ++
  33284. ++/**
  33285. ++ * ether_addr_copy - Copy an Ethernet address
  33286. ++ * @dst: Pointer to a six-byte array Ethernet address destination
  33287. ++ * @src: Pointer to a six-byte array Ethernet address source
  33288. ++ *
  33289. ++ * Please note: dst & src must both be aligned to u16.
  33290. ++ */
  33291. ++#define ether_addr_copy LINUX_BACKPORT(ether_addr_copy)
  33292. ++static inline void ether_addr_copy(u8 *dst, const u8 *src)
  33293. ++{
  33294. ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  33295. ++ *(u32 *)dst = *(const u32 *)src;
  33296. ++ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
  33297. ++#else
  33298. ++ u16 *a = (u16 *)dst;
  33299. ++ const u16 *b = (const u16 *)src;
  33300. ++
  33301. ++ a[0] = b[0];
  33302. ++ a[1] = b[1];
  33303. ++ a[2] = b[2];
  33304. ++#endif
  33305. ++}
  33306. + #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */
  33307. +
  33308. + #endif /* _BACKPORT_LINUX_ETHERDEVICE_H */
  33309. +--- a/drivers/net/wireless/ath/spectral_common.h
  33310. ++++ b/drivers/net/wireless/ath/spectral_common.h
  33311. +@@ -20,6 +20,11 @@
  33312. + #define SPECTRAL_HT20_NUM_BINS 56
  33313. + #define SPECTRAL_HT20_40_NUM_BINS 128
  33314. +
  33315. ++/* TODO: could possibly be 512, but no samples this large
  33316. ++ * could be acquired so far.
  33317. ++ */
  33318. ++#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
  33319. ++
  33320. + /* FFT sample format given to userspace via debugfs.
  33321. + *
  33322. + * Please keep the type/length at the front position and change
  33323. +@@ -31,6 +36,7 @@
  33324. + enum ath_fft_sample_type {
  33325. + ATH_FFT_SAMPLE_HT20 = 1,
  33326. + ATH_FFT_SAMPLE_HT20_40,
  33327. ++ ATH_FFT_SAMPLE_ATH10K,
  33328. + };
  33329. +
  33330. + struct fft_sample_tlv {
  33331. +@@ -85,4 +91,23 @@ struct fft_sample_ht20_40 {
  33332. + u8 data[SPECTRAL_HT20_40_NUM_BINS];
  33333. + } __packed;
  33334. +
  33335. ++struct fft_sample_ath10k {
  33336. ++ struct fft_sample_tlv tlv;
  33337. ++ u8 chan_width_mhz;
  33338. ++ __be16 freq1;
  33339. ++ __be16 freq2;
  33340. ++ __be16 noise;
  33341. ++ __be16 max_magnitude;
  33342. ++ __be16 total_gain_db;
  33343. ++ __be16 base_pwr_db;
  33344. ++ __be64 tsf;
  33345. ++ s8 max_index;
  33346. ++ u8 rssi;
  33347. ++ u8 relpwr_db;
  33348. ++ u8 avgpwr_db;
  33349. ++ u8 max_exp;
  33350. ++
  33351. ++ u8 data[0];
  33352. ++} __packed;
  33353. ++
  33354. + #endif /* SPECTRAL_COMMON_H */
  33355. +--- a/compat/backport-3.13.c
  33356. ++++ b/compat/backport-3.13.c
  33357. +@@ -12,6 +12,10 @@
  33358. + #include <linux/version.h>
  33359. + #include <linux/kernel.h>
  33360. + #include <net/genetlink.h>
  33361. ++#include <linux/delay.h>
  33362. ++#include <linux/pci.h>
  33363. ++#include <linux/device.h>
  33364. ++#include <linux/hwmon.h>
  33365. +
  33366. + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
  33367. + #ifdef CPTCFG_REGULATOR
  33368. +@@ -200,3 +204,103 @@ bool __net_get_random_once(void *buf, in
  33369. + }
  33370. + EXPORT_SYMBOL_GPL(__net_get_random_once);
  33371. + #endif /* __BACKPORT_NET_GET_RANDOM_ONCE */
  33372. ++
  33373. ++#ifdef CPTCFG_PCI
  33374. ++#define pci_bus_read_dev_vendor_id LINUX_BACKPORT(pci_bus_read_dev_vendor_id)
  33375. ++static bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
  33376. ++ int crs_timeout)
  33377. ++{
  33378. ++ int delay = 1;
  33379. ++
  33380. ++ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
  33381. ++ return false;
  33382. ++
  33383. ++ /* some broken boards return 0 or ~0 if a slot is empty: */
  33384. ++ if (*l == 0xffffffff || *l == 0x00000000 ||
  33385. ++ *l == 0x0000ffff || *l == 0xffff0000)
  33386. ++ return false;
  33387. ++
  33388. ++ /*
  33389. ++ * Configuration Request Retry Status. Some root ports return the
  33390. ++ * actual device ID instead of the synthetic ID (0xFFFF) required
  33391. ++ * by the PCIe spec. Ignore the device ID and only check for
  33392. ++ * (vendor id == 1).
  33393. ++ */
  33394. ++ while ((*l & 0xffff) == 0x0001) {
  33395. ++ if (!crs_timeout)
  33396. ++ return false;
  33397. ++
  33398. ++ msleep(delay);
  33399. ++ delay *= 2;
  33400. ++ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
  33401. ++ return false;
  33402. ++ /* Card hasn't responded in 60 seconds? Must be stuck. */
  33403. ++ if (delay > crs_timeout) {
  33404. ++ printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
  33405. ++ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
  33406. ++ PCI_FUNC(devfn));
  33407. ++ return false;
  33408. ++ }
  33409. ++ }
  33410. ++
  33411. ++ return true;
  33412. ++}
  33413. ++
  33414. ++bool pci_device_is_present(struct pci_dev *pdev)
  33415. ++{
  33416. ++ u32 v;
  33417. ++
  33418. ++ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
  33419. ++}
  33420. ++EXPORT_SYMBOL_GPL(pci_device_is_present);
  33421. ++#endif /* CPTCFG_PCI */
  33422. ++
  33423. ++#ifdef CPTCFG_HWMON
  33424. ++struct device*
  33425. ++hwmon_device_register_with_groups(struct device *dev, const char *name,
  33426. ++ void *drvdata,
  33427. ++ const struct attribute_group **groups)
  33428. ++{
  33429. ++ struct device *hwdev;
  33430. ++
  33431. ++ hwdev = hwmon_device_register(dev);
  33432. ++ hwdev->groups = groups;
  33433. ++ dev_set_drvdata(hwdev, drvdata);
  33434. ++ return hwdev;
  33435. ++}
  33436. ++
  33437. ++static void devm_hwmon_release(struct device *dev, void *res)
  33438. ++{
  33439. ++ struct device *hwdev = *(struct device **)res;
  33440. ++
  33441. ++ hwmon_device_unregister(hwdev);
  33442. ++}
  33443. ++
  33444. ++struct device *
  33445. ++devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
  33446. ++ void *drvdata,
  33447. ++ const struct attribute_group **groups)
  33448. ++{
  33449. ++ struct device **ptr, *hwdev;
  33450. ++
  33451. ++ if (!dev)
  33452. ++ return ERR_PTR(-EINVAL);
  33453. ++
  33454. ++ ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
  33455. ++ if (!ptr)
  33456. ++ return ERR_PTR(-ENOMEM);
  33457. ++
  33458. ++ hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
  33459. ++ if (IS_ERR(hwdev))
  33460. ++ goto error;
  33461. ++
  33462. ++ *ptr = hwdev;
  33463. ++ devres_add(dev, ptr);
  33464. ++ return hwdev;
  33465. ++
  33466. ++error:
  33467. ++ devres_free(ptr);
  33468. ++ return hwdev;
  33469. ++}
  33470. ++EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
  33471. ++#endif
  33472. +--- /dev/null
  33473. ++++ b/backport-include/linux/hwmon.h
  33474. +@@ -0,0 +1,34 @@
  33475. ++#ifndef __BACKPORT_LINUX_HWMON_H
  33476. ++#define __BACKPORT_LINUX_HWMON_H
  33477. ++#include_next <linux/hwmon.h>
  33478. ++#include <linux/version.h>
  33479. ++
  33480. ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
  33481. ++/*
  33482. ++ * Backports
  33483. ++ *
  33484. ++ * commit bab2243ce1897865e31ea6d59b0478391f51812b
  33485. ++ * Author: Guenter Roeck <linux@roeck-us.net>
  33486. ++ * Date: Sat Jul 6 13:57:23 2013 -0700
  33487. ++ *
  33488. ++ * hwmon: Introduce hwmon_device_register_with_groups
  33489. ++ *
  33490. ++ * hwmon_device_register_with_groups() lets callers register a hwmon device
  33491. ++ * together with all sysfs attributes in a single call.
  33492. ++ *
  33493. ++ * When using hwmon_device_register_with_groups(), hwmon attributes are attached
  33494. ++ * to the hwmon device directly and no longer with its parent device.
  33495. ++ *
  33496. ++ * Signed-off-by: Guenter Roeck <linux@roeck-us.net>
  33497. ++ */
  33498. ++struct device *
  33499. ++hwmon_device_register_with_groups(struct device *dev, const char *name,
  33500. ++ void *drvdata,
  33501. ++ const struct attribute_group **groups);
  33502. ++struct device *
  33503. ++devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
  33504. ++ void *drvdata,
  33505. ++ const struct attribute_group **groups);
  33506. ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) */
  33507. ++
  33508. ++#endif /* __BACKPORT_LINUX_HWMON_H */
  33509. diff --git a/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch b/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
  33510. index 6a5c766..6a3d2a4 100644
  33511. --- a/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
  33512. +++ b/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
  33513. @@ -1,14 +1,14 @@
  33514. --- a/drivers/net/wireless/ath/ath10k/core.c
  33515. +++ b/drivers/net/wireless/ath/ath10k/core.c
  33516. -@@ -277,7 +277,10 @@ static int ath10k_download_and_run_otp(s
  33517. +@@ -387,7 +387,10 @@ static int ath10k_download_and_run_otp(s
  33518. - ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
  33519. + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
  33520. -- if (result != 0) {
  33521. +- if (!skip_otp && result != 0) {
  33522. + if (result == 2) {
  33523. -+ ath10k_warn("otp stream is empty, using board.bin contents");
  33524. ++ ath10k_warn(ar, "otp stream is empty, using board.bin contents");
  33525. + return 0;
  33526. -+ } else if (result != 0) {
  33527. - ath10k_err("otp calibration failed: %d", result);
  33528. ++ } else if (!skip_otp && result != 0) {
  33529. + ath10k_err(ar, "otp calibration failed: %d", result);
  33530. return -EINVAL;
  33531. }
  33532. diff --git a/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch b/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch
  33533. new file mode 100644
  33534. index 0000000..c664faa
  33535. --- /dev/null
  33536. +++ b/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch
  33537. @@ -0,0 +1,33 @@
  33538. +From: Sven Eckelmann <sven@open-mesh.com>
  33539. +Date: Tue, 18 Nov 2014 12:29:28 +0100
  33540. +Subject: [PATCH] ath10k: Don't initialize devices asynchronously
  33541. +
  33542. +OpenWrt requires all PHYs to be initialized to create the configuration files
  33543. +during bootup. ath10k violates this because it delays the creation of the PHY
  33544. +to a not well defined point in the future.
  33545. +
  33546. +Forcing the work to be done immediately works around this problem but may also
  33547. +delay the boot when firmware images cannot be found.
  33548. +
  33549. +Signed-off-by: Sven Eckelmann <sven@open-mesh.com>
  33550. +---
  33551. +
  33552. +--- a/drivers/net/wireless/ath/ath10k/core.c
  33553. ++++ b/drivers/net/wireless/ath/ath10k/core.c
  33554. +@@ -1321,6 +1321,16 @@ int ath10k_core_register(struct ath10k *
  33555. + ar->chip_id = chip_id;
  33556. + queue_work(ar->workqueue, &ar->register_work);
  33557. +
  33558. ++ /* OpenWrt requires all PHYs to be initialized to create the
  33559. ++ * configuration files during bootup. ath10k violates this
  33560. ++ * because it delays the creation of the PHY to a not well defined
  33561. ++ * point in the future.
  33562. ++ *
  33563. ++ * Forcing the work to be done immediately works around this problem
  33564. ++ * but may also delay the boot when firmware images cannot be found.
  33565. ++ */
  33566. ++ flush_workqueue(ar->workqueue);
  33567. ++
  33568. + return 0;
  33569. + }
  33570. + EXPORT_SYMBOL(ath10k_core_register);
  33571. diff --git a/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch b/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch
  33572. new file mode 100644
  33573. index 0000000..54174b1
  33574. --- /dev/null
  33575. +++ b/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch
  33576. @@ -0,0 +1,37 @@
  33577. +--- a/drivers/net/wireless/ath/ath10k/mac.c
  33578. ++++ b/drivers/net/wireless/ath/ath10k/mac.c
  33579. +@@ -5405,6 +5405,21 @@ struct ath10k_vif *ath10k_get_arvif(stru
  33580. + return arvif_iter.arvif;
  33581. + }
  33582. +
  33583. ++#ifdef CPTCFG_MAC80211_LEDS
  33584. ++static const struct ieee80211_tpt_blink ath10k_tpt_blink[] = {
  33585. ++ { .throughput = 0 * 1024, .blink_time = 334 },
  33586. ++ { .throughput = 1 * 1024, .blink_time = 260 },
  33587. ++ { .throughput = 2 * 1024, .blink_time = 220 },
  33588. ++ { .throughput = 5 * 1024, .blink_time = 190 },
  33589. ++ { .throughput = 10 * 1024, .blink_time = 170 },
  33590. ++ { .throughput = 25 * 1024, .blink_time = 150 },
  33591. ++ { .throughput = 54 * 1024, .blink_time = 130 },
  33592. ++ { .throughput = 120 * 1024, .blink_time = 110 },
  33593. ++ { .throughput = 265 * 1024, .blink_time = 80 },
  33594. ++ { .throughput = 586 * 1024, .blink_time = 50 },
  33595. ++};
  33596. ++#endif
  33597. ++
  33598. + int ath10k_mac_register(struct ath10k *ar)
  33599. + {
  33600. + struct ieee80211_supported_band *band;
  33601. +@@ -5567,6 +5582,12 @@ int ath10k_mac_register(struct ath10k *a
  33602. + goto err_free;
  33603. + }
  33604. +
  33605. ++#if CPTCFG_MAC80211_LEDS
  33606. ++ ieee80211_create_tpt_led_trigger(ar->hw,
  33607. ++ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath10k_tpt_blink,
  33608. ++ ARRAY_SIZE(ath10k_tpt_blink));
  33609. ++#endif
  33610. ++
  33611. + ret = ieee80211_register_hw(ar->hw);
  33612. + if (ret) {
  33613. + ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
  33614. diff --git a/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch b/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch
  33615. new file mode 100644
  33616. index 0000000..0011b5d
  33617. --- /dev/null
  33618. +++ b/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch
  33619. @@ -0,0 +1,32 @@
  33620. +--- a/drivers/net/wireless/ath/ath10k/mac.c
  33621. ++++ b/drivers/net/wireless/ath/ath10k/mac.c
  33622. +@@ -5253,6 +5253,10 @@ static const struct ieee80211_iface_limi
  33623. + .max = 7,
  33624. + .types = BIT(NL80211_IFTYPE_AP)
  33625. + },
  33626. ++ {
  33627. ++ .max = 1,
  33628. ++ .types = BIT(NL80211_IFTYPE_ADHOC)
  33629. ++ },
  33630. + };
  33631. +
  33632. + static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
  33633. +@@ -5260,6 +5264,10 @@ static const struct ieee80211_iface_limi
  33634. + .max = 8,
  33635. + .types = BIT(NL80211_IFTYPE_AP)
  33636. + },
  33637. ++ {
  33638. ++ .max = 1,
  33639. ++ .types = BIT(NL80211_IFTYPE_ADHOC)
  33640. ++ },
  33641. + };
  33642. +
  33643. + static const struct ieee80211_iface_combination ath10k_if_comb[] = {
  33644. +@@ -5555,6 +5563,7 @@ int ath10k_mac_register(struct ath10k *a
  33645. + ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
  33646. + ar->hw->wiphy->n_iface_combinations =
  33647. + ARRAY_SIZE(ath10k_10x_if_comb);
  33648. ++ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
  33649. + break;
  33650. + case ATH10K_FW_WMI_OP_VERSION_UNSET:
  33651. + case ATH10K_FW_WMI_OP_VERSION_MAX: