@@ -199,13 +199,201 @@ void ionic_init_devinfo(struct ionic *ionic)
199199 dev_dbg (ionic -> dev , "fw_version %s\n" , idev -> dev_info .fw_version );
200200}
201201
202+ static void ionic_map_disc_cmb (struct ionic * ionic )
203+ {
204+ struct ionic_identity * ident = & ionic -> ident ;
205+ u32 length_reg0 , length , offset , num_regions ;
206+ struct ionic_dev_bar * bar = ionic -> bars ;
207+ struct ionic_dev * idev = & ionic -> idev ;
208+ struct device * dev = ionic -> dev ;
209+ int err , sz , i ;
210+ u64 end ;
211+
212+ mutex_lock (& ionic -> dev_cmd_lock );
213+
214+ ionic_dev_cmd_discover_cmb (idev );
215+ err = ionic_dev_cmd_wait (ionic , DEVCMD_TIMEOUT );
216+ if (!err ) {
217+ sz = min (sizeof (ident -> cmb_layout ),
218+ sizeof (idev -> dev_cmd_regs -> data ));
219+ memcpy_fromio (& ident -> cmb_layout ,
220+ & idev -> dev_cmd_regs -> data , sz );
221+ }
222+ mutex_unlock (& ionic -> dev_cmd_lock );
223+
224+ if (err ) {
225+ dev_warn (dev , "Cannot discover CMB layout, disabling CMB\n" );
226+ return ;
227+ }
228+
229+ bar += 2 ;
230+
231+ num_regions = le32_to_cpu (ident -> cmb_layout .num_regions );
232+ if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS ) {
233+ dev_warn (dev , "Invalid number of CMB entries (%d)\n" ,
234+ num_regions );
235+ return ;
236+ }
237+
238+ dev_dbg (dev , "ionic_cmb_layout_identity num_regions %d flags %x:\n" ,
239+ num_regions , ident -> cmb_layout .flags );
240+
241+ for (i = 0 ; i < num_regions ; i ++ ) {
242+ offset = le32_to_cpu (ident -> cmb_layout .region [i ].offset );
243+ length = le32_to_cpu (ident -> cmb_layout .region [i ].length );
244+ end = offset + length ;
245+
246+ dev_dbg (dev , "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n" ,
247+ i , ident -> cmb_layout .region [i ].bar_num ,
248+ ident -> cmb_layout .region [i ].cmb_type ,
249+ offset , length );
250+
251+ if (end > (bar -> len >> IONIC_CMB_SHIFT_64K )) {
252+ dev_warn (dev , "Out of bounds CMB region %d offset %x length %u\n" ,
253+ i , offset , length );
254+ return ;
255+ }
256+ }
257+
258+ /* if first entry matches PCI config, expdb is not supported */
259+ if (ident -> cmb_layout .region [0 ].bar_num == bar -> res_index &&
260+ le32_to_cpu (ident -> cmb_layout .region [0 ].length ) == bar -> len &&
261+ !ident -> cmb_layout .region [0 ].offset ) {
262+ dev_warn (dev , "No CMB mapping discovered\n" );
263+ return ;
264+ }
265+
266+ /* process first entry for regular mapping */
267+ length_reg0 = le32_to_cpu (ident -> cmb_layout .region [0 ].length );
268+ if (!length_reg0 ) {
269+ dev_warn (dev , "region len = 0. No CMB mapping discovered\n" );
270+ return ;
271+ }
272+
273+ /* Verify first entry size matches expected 8MB size (in 64KB pages) */
274+ if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K ) {
275+ dev_warn (dev , "Unexpected CMB size in entry 0: %u pages\n" ,
276+ length_reg0 );
277+ return ;
278+ }
279+
280+ sz = BITS_TO_LONGS ((length_reg0 << IONIC_CMB_SHIFT_64K ) /
281+ PAGE_SIZE ) * sizeof (long );
282+ idev -> cmb_inuse = kzalloc (sz , GFP_KERNEL );
283+ if (!idev -> cmb_inuse ) {
284+ dev_warn (dev , "No memory for CMB, disabling\n" );
285+ idev -> phy_cmb_pages = 0 ;
286+ idev -> phy_cmb_expdb64_pages = 0 ;
287+ idev -> phy_cmb_expdb128_pages = 0 ;
288+ idev -> phy_cmb_expdb256_pages = 0 ;
289+ idev -> phy_cmb_expdb512_pages = 0 ;
290+ idev -> cmb_npages = 0 ;
291+ return ;
292+ }
293+
294+ for (i = 0 ; i < num_regions ; i ++ ) {
295+ /* check this region matches first region length as to
296+ * ease implementation
297+ */
298+ if (le32_to_cpu (ident -> cmb_layout .region [i ].length ) !=
299+ length_reg0 )
300+ continue ;
301+
302+ offset = le32_to_cpu (ident -> cmb_layout .region [i ].offset );
303+
304+ switch (ident -> cmb_layout .region [i ].cmb_type ) {
305+ case IONIC_CMB_TYPE_DEVMEM :
306+ idev -> phy_cmb_pages = bar -> bus_addr + offset ;
307+ idev -> cmb_npages =
308+ (length_reg0 << IONIC_CMB_SHIFT_64K ) / PAGE_SIZE ;
309+ dev_dbg (dev , "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n" ,
310+ & bar -> bus_addr , i , length );
311+ dev_dbg (dev , "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n" ,
312+ & idev -> phy_cmb_pages , idev -> cmb_npages );
313+ break ;
314+
315+ case IONIC_CMB_TYPE_EXPDB64 :
316+ idev -> phy_cmb_expdb64_pages =
317+ bar -> bus_addr + (offset << IONIC_CMB_SHIFT_64K );
318+ dev_dbg (dev , "idev->phy_cmb_expdb64_pages %pad\n" ,
319+ & idev -> phy_cmb_expdb64_pages );
320+ break ;
321+
322+ case IONIC_CMB_TYPE_EXPDB128 :
323+ idev -> phy_cmb_expdb128_pages =
324+ bar -> bus_addr + (offset << IONIC_CMB_SHIFT_64K );
325+ dev_dbg (dev , "idev->phy_cmb_expdb128_pages %pad\n" ,
326+ & idev -> phy_cmb_expdb128_pages );
327+ break ;
328+
329+ case IONIC_CMB_TYPE_EXPDB256 :
330+ idev -> phy_cmb_expdb256_pages =
331+ bar -> bus_addr + (offset << IONIC_CMB_SHIFT_64K );
332+ dev_dbg (dev , "idev->phy_cmb_expdb256_pages %pad\n" ,
333+ & idev -> phy_cmb_expdb256_pages );
334+ break ;
335+
336+ case IONIC_CMB_TYPE_EXPDB512 :
337+ idev -> phy_cmb_expdb512_pages =
338+ bar -> bus_addr + (offset << IONIC_CMB_SHIFT_64K );
339+ dev_dbg (dev , "idev->phy_cmb_expdb512_pages %pad\n" ,
340+ & idev -> phy_cmb_expdb512_pages );
341+ break ;
342+
343+ default :
344+ dev_warn (dev , "[%d] Invalid cmb_type (%d)\n" ,
345+ i , ident -> cmb_layout .region [i ].cmb_type );
346+ break ;
347+ }
348+ }
349+ }
350+
351+ static void ionic_map_classic_cmb (struct ionic * ionic )
352+ {
353+ struct ionic_dev_bar * bar = ionic -> bars ;
354+ struct ionic_dev * idev = & ionic -> idev ;
355+ struct device * dev = ionic -> dev ;
356+ int sz ;
357+
358+ bar += 2 ;
359+ /* classic CMB mapping */
360+ idev -> phy_cmb_pages = bar -> bus_addr ;
361+ idev -> cmb_npages = bar -> len / PAGE_SIZE ;
362+ dev_dbg (dev , "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n" ,
363+ & bar -> bus_addr , bar -> len );
364+ dev_dbg (dev , "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n" ,
365+ & idev -> phy_cmb_pages , idev -> cmb_npages );
366+
367+ sz = BITS_TO_LONGS (idev -> cmb_npages ) * sizeof (long );
368+ idev -> cmb_inuse = kzalloc (sz , GFP_KERNEL );
369+ if (!idev -> cmb_inuse ) {
370+ idev -> phy_cmb_pages = 0 ;
371+ idev -> cmb_npages = 0 ;
372+ }
373+ }
374+
375+ void ionic_map_cmb (struct ionic * ionic )
376+ {
377+ struct pci_dev * pdev = ionic -> pdev ;
378+ struct device * dev = ionic -> dev ;
379+
380+ if (!(pci_resource_flags (pdev , 4 ) & IORESOURCE_MEM )) {
381+ dev_dbg (dev , "No CMB, disabling\n" );
382+ return ;
383+ }
384+
385+ if (ionic -> ident .dev .capabilities & cpu_to_le64 (IONIC_DEV_CAP_DISC_CMB ))
386+ ionic_map_disc_cmb (ionic );
387+ else
388+ ionic_map_classic_cmb (ionic );
389+ }
390+
202391int ionic_dev_setup (struct ionic * ionic )
203392{
204393 struct ionic_dev_bar * bar = ionic -> bars ;
205394 unsigned int num_bars = ionic -> num_bars ;
206395 struct ionic_dev * idev = & ionic -> idev ;
207396 struct device * dev = ionic -> dev ;
208- int size ;
209397 u32 sig ;
210398 int err ;
211399
@@ -255,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic)
255443 mutex_init (& idev -> cmb_inuse_lock );
256444 if (num_bars < 3 || !ionic -> bars [IONIC_PCI_BAR_CMB ].len ) {
257445 idev -> cmb_inuse = NULL ;
446+ idev -> phy_cmb_pages = 0 ;
447+ idev -> cmb_npages = 0 ;
258448 return 0 ;
259449 }
260450
261- idev -> phy_cmb_pages = bar -> bus_addr ;
262- idev -> cmb_npages = bar -> len / PAGE_SIZE ;
263- size = BITS_TO_LONGS (idev -> cmb_npages ) * sizeof (long );
264- idev -> cmb_inuse = kzalloc (size , GFP_KERNEL );
265- if (!idev -> cmb_inuse )
266- dev_warn (dev , "No memory for CMB, disabling\n" );
267-
268451 return 0 ;
269452}
270453
@@ -277,6 +460,11 @@ void ionic_dev_teardown(struct ionic *ionic)
277460 idev -> phy_cmb_pages = 0 ;
278461 idev -> cmb_npages = 0 ;
279462
463+ idev -> phy_cmb_expdb64_pages = 0 ;
464+ idev -> phy_cmb_expdb128_pages = 0 ;
465+ idev -> phy_cmb_expdb256_pages = 0 ;
466+ idev -> phy_cmb_expdb512_pages = 0 ;
467+
280468 if (ionic -> wq ) {
281469 destroy_workqueue (ionic -> wq );
282470 ionic -> wq = NULL ;
@@ -698,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
698886 ionic_dev_cmd_go (idev , & cmd );
699887}
700888
889+ void ionic_dev_cmd_discover_cmb (struct ionic_dev * idev )
890+ {
891+ union ionic_dev_cmd cmd = {
892+ .discover_cmb .opcode = IONIC_CMD_DISCOVER_CMB ,
893+ };
894+
895+ ionic_dev_cmd_go (idev , & cmd );
896+ }
897+
701898int ionic_db_page_num (struct ionic_lif * lif , int pid )
702899{
703900 return (lif -> hw_index * lif -> dbid_count ) + pid ;
704901}
705902
706- int ionic_get_cmb (struct ionic_lif * lif , u32 * pgid , phys_addr_t * pgaddr , int order )
903+ int ionic_get_cmb (struct ionic_lif * lif , u32 * pgid , phys_addr_t * pgaddr ,
904+ int order , u8 stride_log2 , bool * expdb )
707905{
708906 struct ionic_dev * idev = & lif -> ionic -> idev ;
709- int ret ;
907+ void __iomem * nonexpdb_pgptr ;
908+ phys_addr_t nonexpdb_pgaddr ;
909+ int i , idx ;
710910
711911 mutex_lock (& idev -> cmb_inuse_lock );
712- ret = bitmap_find_free_region (idev -> cmb_inuse , idev -> cmb_npages , order );
912+ idx = bitmap_find_free_region (idev -> cmb_inuse , idev -> cmb_npages , order );
713913 mutex_unlock (& idev -> cmb_inuse_lock );
714914
715- if (ret < 0 )
716- return ret ;
915+ if (idx < 0 )
916+ return idx ;
917+
918+ * pgid = (u32 )idx ;
919+
920+ if (idev -> phy_cmb_expdb64_pages &&
921+ stride_log2 == IONIC_EXPDB_64B_WQE_LG2 ) {
922+ * pgaddr = idev -> phy_cmb_expdb64_pages + idx * PAGE_SIZE ;
923+ if (expdb )
924+ * expdb = true;
925+ } else if (idev -> phy_cmb_expdb128_pages &&
926+ stride_log2 == IONIC_EXPDB_128B_WQE_LG2 ) {
927+ * pgaddr = idev -> phy_cmb_expdb128_pages + idx * PAGE_SIZE ;
928+ if (expdb )
929+ * expdb = true;
930+ } else if (idev -> phy_cmb_expdb256_pages &&
931+ stride_log2 == IONIC_EXPDB_256B_WQE_LG2 ) {
932+ * pgaddr = idev -> phy_cmb_expdb256_pages + idx * PAGE_SIZE ;
933+ if (expdb )
934+ * expdb = true;
935+ } else if (idev -> phy_cmb_expdb512_pages &&
936+ stride_log2 == IONIC_EXPDB_512B_WQE_LG2 ) {
937+ * pgaddr = idev -> phy_cmb_expdb512_pages + idx * PAGE_SIZE ;
938+ if (expdb )
939+ * expdb = true;
940+ } else {
941+ * pgaddr = idev -> phy_cmb_pages + idx * PAGE_SIZE ;
942+ if (expdb )
943+ * expdb = false;
944+ }
717945
718- * pgid = ret ;
719- * pgaddr = idev -> phy_cmb_pages + ret * PAGE_SIZE ;
946+ /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
947+ nonexpdb_pgaddr = idev -> phy_cmb_pages + idx * PAGE_SIZE ;
948+ for (i = 0 ; i < (1 << order ); i ++ ) {
949+ nonexpdb_pgptr =
950+ ioremap_wc (nonexpdb_pgaddr + i * PAGE_SIZE , PAGE_SIZE );
951+ if (!nonexpdb_pgptr ) {
952+ ionic_put_cmb (lif , * pgid , order );
953+ return - ENOMEM ;
954+ }
955+ memset_io (nonexpdb_pgptr , 0 , PAGE_SIZE );
956+ iounmap (nonexpdb_pgptr );
957+ }
720958
721959 return 0 ;
722960}
961+ EXPORT_SYMBOL_NS (ionic_get_cmb , "NET_IONIC" );
723962
724963void ionic_put_cmb (struct ionic_lif * lif , u32 pgid , int order )
725964{
@@ -729,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
729968 bitmap_release_region (idev -> cmb_inuse , pgid , order );
730969 mutex_unlock (& idev -> cmb_inuse_lock );
731970}
971+ EXPORT_SYMBOL_NS (ionic_put_cmb , "NET_IONIC" );
732972
733973int ionic_cq_init (struct ionic_lif * lif , struct ionic_cq * cq ,
734974 struct ionic_intr_info * intr ,
0 commit comments