From 8562e0f2fb4afa317fd5c3d07bdc12feb08820bb Mon Sep 17 00:00:00 2001 From: Nidhi Shivashankara Belur Date: Wed, 26 Jun 2024 16:46:09 -0700 Subject: [PATCH] Document Updates for fec operator v2.9.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit includes following updates to the documentation. - Configuration of Intel® vRAN Boost 2.0 (VRB2) using a new API `SriovVrbClusterConfig`. - Re-arrange config specs. - Replace ACC200 with VRB1 for documentation only, not in the config specs. - Replace status output of SriovFecNodeConfig with latest results to include the pf_bb_config version used by the FEC Operator. Story: 2011121 Task: 50460 Change-Id: I9cd93087636b3967c804f6bb6c4b24c3850f5aeb Signed-off-by: Nidhi Shivashankara Belur --- ...or-hosted-vran-containarized-workloads.rst | 951 +++++++++++------- .../set-up-pods-to-use-sriov.rst | 49 +- ...flexran-2203-on-starlingx-1d1b15ecb16f.rst | 2 +- doc/source/shared/abbrevs.txt | 3 +- 4 files changed, 612 insertions(+), 393 deletions(-) diff --git a/doc/source/node_management/kubernetes/hardware_acceleration_devices/configure-sriov-fec-operator-to-enable-hw-accelerators-for-hosted-vran-containarized-workloads.rst b/doc/source/node_management/kubernetes/hardware_acceleration_devices/configure-sriov-fec-operator-to-enable-hw-accelerators-for-hosted-vran-containarized-workloads.rst index e35401ada..34c097695 100644 --- a/doc/source/node_management/kubernetes/hardware_acceleration_devices/configure-sriov-fec-operator-to-enable-hw-accelerators-for-hosted-vran-containarized-workloads.rst +++ b/doc/source/node_management/kubernetes/hardware_acceleration_devices/configure-sriov-fec-operator-to-enable-hw-accelerators-for-hosted-vran-containarized-workloads.rst @@ -15,9 +15,11 @@ following |vRAN| |FEC| accelerators: - Intel® vRAN Dedicated Accelerator ACC100. -- Intel® |FPGA| Programmable Acceleration Card N3000. +- Intel® vRAN Boost Accelerator 1.0 VRB1 (formerly ACC200). -- Intel® vRAN Dedicated Accelerator ACC200. +- Intel® vRAN Boost Accelerator 2.0 VRB2. + +- Intel® |FPGA| Programmable Acceleration Card N3000. .. rubric:: |prereq| @@ -51,7 +53,7 @@ following |vRAN| |FEC| accelerators: | updated_at | None | +---------------+-------------------------------------+ -#. Configure a different resource name for |FEC| devices as desired. +#. (Optional) Configure a different resource name for |FEC| devices as desired. - To change the resource name for ACC100, use the following command: @@ -108,11 +110,21 @@ following |vRAN| |FEC| accelerators: #. List all the nodes in the cluster with |FEC| accelerators installed. - .. code-block:: none + - ACC100 and N3000 - $ kubectl get sriovfecnodeconfigs.sriovfec.intel.com -n sriov-fec-system - NAME CONFIGURED - controller-0 NotRequested + .. code-block:: none + + $ kubectl get sriovfecnodeconfigs.sriovfec.intel.com -n sriov-fec-system + NAME CONFIGURED + controller-0 NotRequested + + - VRB1 and VRB2 + + .. code-block:: none + + $ kubectl get sriovvrbnodeconfigs.sriovvrb.intel.com -n sriov-fec-system + NAME CONFIGURED + controller-0 NotRequested #. Find the |PCI| address of the |PF| of |SRIOV| |FEC| accelerator device to be configured. @@ -147,10 +159,78 @@ following |vRAN| |FEC| accelerators: - deviceID: 0d5c driver: "" maxVirtualFunctions: 16 - pciAddress: "0000:17:00.0" + pciAddress: "0000:8a:00.0" vendorID: "8086" virtualFunctions: [] + - VRB1 + + .. code-block:: none + + $ kubectl get sriovvrbnodeconfigs.sriovvrb.intel.com -n sriov-fec-system controller-0 -o yaml + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbNodeConfig + metadata: + creationTimestamp: "2024-05-17T01:35:36Z" + generation: 1 + name: controller-0 + namespace: sriov-fec-system + resourceVersion: "1420543" + uid: 4db81a14-2ddf-4fc3-9f09-939ece5fd33a + spec: + physicalFunctions: [] + status: + conditions: + - lastTransitionTime: "2024-05-17T01:35:36Z" + message: "" + observedGeneration: 1 + reason: NotRequested + status: "False" + type: Configured + inventory: + sriovAccelerators: + - deviceID: 57c0 + driver: vfio-pci + maxVirtualFunctions: 16 + pciAddress: 0000:f7:00.0 + vendorID: "8086" + virtualFunctions: [] + pfBbConfVersion: v24.03-0-g1bbb3ac + + - VRB2 + + .. code-block:: none + + $ kubectl get sriovvrbnodeconfigs.sriovvrb.intel.com -n sriov-fec-system controller-0 -o yaml + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbNodeConfig + metadata: + creationTimestamp: "2024-06-26T20:32:51Z" + generation: 1 + name: controller-0 + namespace: sriov-fec-system + resourceVersion: "9384433" + uid: 31a7325e-d943-400b-aa14-2449d2d019c3 + spec: + physicalFunctions: [] + status: + conditions: + - lastTransitionTime: "2024-06-26T20:32:52Z" + message: "" + observedGeneration: 1 + reason: NotRequested + status: "False" + type: Configured + inventory: + sriovAccelerators: + - deviceID: 57c2 + driver: vfio-pci + maxVirtualFunctions: 64 + pciAddress: "0000:07:00.0" + vendorID: "8086" + virtualFunctions: [] + pfBbConfVersion: v24.03-0-g1bbb3ac + - N3000 .. code-block:: none @@ -185,40 +265,6 @@ following |vRAN| |FEC| accelerators: vendorID: "8086" virtualFunctions: [] - - ACC200 - - .. code-block:: none - - $ kubectl get sriovfecnodeconfigs.sriovfec.intel.com -n sriov-fec-system controller-0 -o yaml - apiVersion: sriovfec.intel.com/v2 - kind: SriovFecNodeConfig - metadata: - creationTimestamp: "2022-10-21T18:31:41Z" - generation: 1 - name: controller-0 - namespace: sriov-fec-system - resourceVersion: "2144487" - selfLink: /apis/sriovfec.intel.com/v2/namespaces/sriov-fec-system/sriovfecnodeconfigs/controller-0 - uid: e4e536fc-a777-4e26-974d-71226d43c4ed - spec: - physicalFunctions: [] - status: - conditions: - - lastTransitionTime: "2022-10-21T18:31:41Z" - message: "" - observedGeneration: 1 - reason: NotRequested - status: "False" - type: Configured - inventory: - sriovAccelerators: - - deviceID: 57c0 - driver: "" - maxVirtualFunctions: 16 - pciAddress: 0000:f7:00.0 - vendorID: "8086" - virtualFunctions: [] - #. Apply the |FEC| device configuration. #. ACC100 device configuration. @@ -249,7 +295,7 @@ following |vRAN| |FEC| accelerators: nodeSelector: kubernetes.io/hostname: controller-0 acceleratorSelector: - pciAddress: 0000:17:00.0 + pciAddress: 0000:8a:00.0 physicalFunction: pfDriver: "vfio-pci" vfDriver: "vfio-pci" @@ -294,7 +340,7 @@ following |vRAN| |FEC| accelerators: nodeSelector: kubernetes.io/hostname: controller-0 acceleratorSelector: - pciAddress: 0000:17:00.0 + pciAddress: 0000:8a:00.0 physicalFunction: pfDriver: "vfio-pci" vfDriver: "vfio-pci" @@ -323,141 +369,134 @@ following |vRAN| |FEC| accelerators: aqDepthLog2: 4 drainSkip: true - #. N3000 device configuration. + #. VRB1 device configuration. - - The maximum number of |VFs| that can be configured for N3000 is 8 - |VFs|. - - - The maximum number of queues that can be mapped to each VF for uplink - or downlink is 32. - - - The following configuration for N3000 creates 1 |VF| with 32 - queues each for 5G uplink and 5G downlink. - - .. code-block:: none - - apiVersion: sriovfec.intel.com/v2 - kind: SriovFecClusterConfig - metadata: - name: config - namespace: sriov-fec-system - spec: - priority: 1 - nodeSelector: - kubernetes.io/hostname: controller-0 - acceleratorSelector: - pciAddress: 0000:1c:00.0 - physicalFunction: - pfDriver: pci-pf-stub - vfDriver: vfio-pci - vfAmount: 1 - bbDevConfig: - n3000: - # Network Type: either "FPGA_5GNR" or "FPGA_LTE" - networkType: "FPGA_5GNR" - # Pf mode: false = VF Programming, true = PF Programming - pfMode: false - flrTimeout: 610 - downlink: - bandwidth: 3 - loadBalance: 128 - queues: - vf0: 32 - vf1: 0 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 - uplink: - bandwidth: 3 - loadBalance: 128 - queues: - vf0: 32 - vf1: 0 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 - drainSkip: true - - - The following configuration for N3000 creates 2 |VFs| with 16 - queues each, mapping 32 queues with 2 |VFs| for 5G uplink and - another 32 queues with 2 |VFs| for 5G downlink. - - .. code-block:: none - - apiVersion: sriovfec.intel.com/v2 - kind: SriovFecClusterConfig - metadata: - name: config - namespace: sriov-fec-system - spec: - priority: 1 - nodeSelector: - kubernetes.io/hostname: controller-0 - acceleratorSelector: - pciAddress: 0000:1c:00.0 - physicalFunction: - pfDriver: pci-pf-stub - vfDriver: vfio-pci - vfAmount: 2 - bbDevConfig: - n3000: - # Network Type: either "FPGA_5GNR" or "FPGA_LTE" - networkType: "FPGA_5GNR" - # Pf mode: false = VF Programming, true = PF Programming - pfMode: false - flrTimeout: 610 - downlink: - bandwidth: 3 - loadBalance: 128 - queues: - vf0: 16 - vf1: 16 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 - uplink: - bandwidth: 3 - loadBalance: 128 - queues: - vf0: 16 - vf1: 16 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 - drainSkip: true - - #. ACC200 device configuration. - - - The maximum number of |VFs| that can be configured for ACC200 + - The maximum number of |VFs| that can be configured for VRB1 is 16 |VFs|. - There are 16 queue groups available which can be allocated to any - available operation (4GUL/4GDL/5GUL/5GDL) based on the + available operation (4GUL/4GDL/5GUL/5GDL/FFT) based on the ``numQueueGroups`` parameter. - The product of ``numQueueGroups`` x ``numAqsPerGroups`` x ``aqDepthLog2`` x ``numVfBundles`` must be less than 64K. - - The following configuration creates 1 |VF|, configures ACC200's 12 + - The following configuration creates 1 |VF|, configures VRB1's 12 queue groups; allocating 16 queues per |VF| for 5G processing engine functions(5GUL/5GDL/FFT). .. code-block:: none - apiVersion: sriovfec.intel.com/v2 - kind: SriovFecClusterConfig + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbClusterConfig + metadata: + name: config + namespace: sriov-fec-system + spec: + acceleratorSelector: + pciAddress: 0000:f7:00.0 + nodeSelector: + kubernetes.io/hostname: controller-0 + priority: 1 + drainSkip: true + physicalFunction: + pfDriver: vfio-pci + vfDriver: vfio-pci + vfAmount: 1 + bbDevConfig: + vrb1: + numVfBundles: 1 + pfMode: false + maxQueueSize: 1024 + downlink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 0 + uplink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 0 + downlink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + uplink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + qfft: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + + - The following configuration creates 2 |VF|, configures VRB1's 16 + queue groups; allocating 16 queues per |VF| for 4G and 5G + processing engine functions(4GUL/4GDL/5GUL/5GDL/FFT). + + .. code-block:: none + + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbClusterConfig + metadata: + name: config + namespace: sriov-fec-system + spec: + acceleratorSelector: + pciAddress: 0000:f7:00.0 + nodeSelector: + kubernetes.io/hostname: controller-0 + priority: 1 + drainSkip: true + physicalFunction: + pfDriver: vfio-pci + vfDriver: vfio-pci + vfAmount: 2 + bbDevConfig: + vrb1: + numVfBundles: 2 + pfMode: false + maxQueueSize: 1024 + downlink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + uplink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + downlink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + uplink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + qfft: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + + #. VRB2 device configuration. + + - The maximum number of |VFs| that can be configured for VRB2 + is 64 |VFs|. + + - There are 32 queue groups available which can be allocated to any + available operation (4GUL/4GDL/5GUL/5GDL/FFT/MLD) based on the + ``numQueueGroups`` parameter. + + - The product of ``numQueueGroups`` x ``numAqsPerGroups`` x + ``aqDepthLog2`` x ``numVfBundles`` must be less than 256K. + + - The following configuration creates 1 |VF|, configures VRB2's 32 + queue groups; allocating 64 queues per |VF| for 5G processing + engine functions(5GUL/5GDL/FFT/MLD). + + .. code-block:: none + + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbClusterConfig metadata: name: config namespace: sriov-fec-system @@ -466,42 +505,53 @@ following |vRAN| |FEC| accelerators: nodeSelector: kubernetes.io/hostname: controller-0 acceleratorSelector: - pciAddress: 0000:f7:00.0 + pciAddress: 0000:07:00.0 physicalFunction: pfDriver: vfio-pci vfDriver: vfio-pci vfAmount: 1 bbDevConfig: - acc200: + vrb2: # Pf mode: false = VF Programming, true = PF Programming pfMode: false numVfBundles: 1 maxQueueSize: 1024 uplink4G: numQueueGroups: 0 - numAqsPerGroups: 16 - aqDepthLog2: 4 + numAqsPerGroups: 64 + aqDepthLog2: 5 downlink4G: numQueueGroups: 0 - numAqsPerGroups: 16 - aqDepthLog2: 4 + numAqsPerGroups: 64 + aqDepthLog2: 5 uplink5G: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 + numQueueGroups: 8 + numAqsPerGroups: 64 + aqDepthLog2: 5 downlink5G: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 + numQueueGroups: 8 + numAqsPerGroups: 64 + aqDepthLog2: 5 qfft: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 + numQueueGroups: 8 + numAqsPerGroups: 64 + aqDepthLog2: 5 + qmld: + numQueueGroups: 8 + numAqsPerGroups: 64 + aqDepthLog2: 5 drainSkip: true - - The following configuration creates 2 |VF|, configures ACC200's 16 - queue groups; allocating 16 queues per |VF| for 4G and 5G - processing engine functions(4GUL/4GDL/5GUL/5GDL/FFT). + #. N3000 device configuration. + + - The maximum number of |VFs| that can be configured for N3000 is 8 + |VFs|. + + - The maximum number of queues that can be mapped to each VF for + uplink or downlink is 32. + + - The following configuration for N3000 creates 1 |VF| with 32 + queues each for 5G uplink and 5G downlink. .. code-block:: none @@ -515,67 +565,135 @@ following |vRAN| |FEC| accelerators: nodeSelector: kubernetes.io/hostname: controller-0 acceleratorSelector: - pciAddress: 0000:f7:00.0 + pciAddress: 0000:1c:00.0 + physicalFunction: + pfDriver: pci-pf-stub + vfDriver: vfio-pci + vfAmount: 1 + bbDevConfig: + n3000: + # Network Type: either "FPGA_5GNR" or "FPGA_LTE" + networkType: "FPGA_5GNR" + # Pf mode: false = VF Programming, true = PF Programming + pfMode: false + flrTimeout: 610 + downlink: + bandwidth: 3 + loadBalance: 128 + queues: + vf0: 32 + vf1: 0 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 + uplink: + bandwidth: 3 + loadBalance: 128 + queues: + vf0: 32 + vf1: 0 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 + drainSkip: true + + - The following configuration for N3000 creates 2 |VFs| with 16 + queues each, mapping 32 queues with 2 |VFs| for 5G uplink and + another 32 queues with 2 |VFs| for 5G downlink. + + .. code-block:: none + + apiVersion: sriovfec.intel.com/v2 + kind: SriovFecClusterConfig + metadata: + name: config + namespace: sriov-fec-system + spec: + priority: 1 + nodeSelector: + kubernetes.io/hostname: controller-0 + acceleratorSelector: + pciAddress: 0000:1c:00.0 physicalFunction: pfDriver: vfio-pci vfDriver: vfio-pci vfAmount: 2 bbDevConfig: - acc200: + n3000: + # Network Type: either "FPGA_5GNR" or "FPGA_LTE" + networkType: "FPGA_5GNR" # Pf mode: false = VF Programming, true = PF Programming pfMode: false - numVfBundles: 2 - maxQueueSize: 1024 - uplink4G: - numQueueGroups: 2 - numAqsPerGroups: 16 - aqDepthLog2: 4 - downlink4G: - numQueueGroups: 2 - numAqsPerGroups: 16 - aqDepthLog2: 4 - uplink5G: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 - downlink5G: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 - qfft: - numQueueGroups: 4 - numAqsPerGroups: 16 - aqDepthLog2: 4 + flrTimeout: 610 + downlink: + bandwidth: 3 + loadBalance: 128 + queues: + vf0: 16 + vf1: 16 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 + uplink: + bandwidth: 3 + loadBalance: 128 + queues: + vf0: 16 + vf1: 16 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 drainSkip: true + #. The ``SriovFecClusterConfig`` or ``SriovVrbClusterConfig`` sets the + default value ``spec.drainSkip: True`` to avoid node draining. - - #. The ``SriovFecClusterConfig`` must be provided with - ``spec.drainSkip: True`` to avoid node draining. - - #. Create and apply a ``SriovFecClusterConfig`` custom resource using - the above examples as templates, setting the parameters + #. Create and apply a ``SriovFecClusterConfig`` or + ``SriovVrbClusterConfig`` custom resource using the above examples as + templates, setting the parameters ``nodeSelector:kubernetes.io/hostname`` and ``acceleratorSelector:pciAddress`` to select the desired device and configuring ``vfAmount`` and ``numVfBundles`` as desired. - .. code-block:: none + - For ACC100 and N3000 - $ kubectl apply -f .yaml - sriovfecclusterconfig.sriovfec.intel.com/config created + .. code-block:: none + + $ kubectl apply -f .yaml + sriovfecclusterconfig.sriovfec.intel.com/config created + + - For VRB1 and VRB2 + + .. code-block:: none + + $ kubectl apply -f .yaml + sriovvrbclusterconfig.sriovvrb.intel.com/config created .. note:: The ``vfAmount`` and ``numVfBundles`` in ``SriovFecClusterConfig`` - must be always equal for ACC100/ACC200. + or ``SriovVrbClusterConfig`` must be always equal for ACC100, + VRB1, VRB2. -#. Verify that the |FEC| configuration is applied. +#. Verify that the |FEC| or |VRB| configuration is applied. .. note:: - When using |FEC| operator, there is no integration between |FEC| operator - and system inventory, so the configuration applied by |FEC| operator may - not reflect in system inventory. + When using |FEC| operator, there is no integration between |FEC| operator + and system inventory, so the configuration applied by |FEC| operator may + not reflect in the system inventory. - An example of ACC100 status after applying 1 |VF| configuration. @@ -585,14 +703,14 @@ following |vRAN| |FEC| accelerators: apiVersion: sriovfec.intel.com/v2 kind: SriovFecNodeConfig metadata: - creationTimestamp: "2022-09-29T19:49:59Z" + creationTimestamp: "2024-06-24T18:15:02Z" generation: 2 name: controller-0 namespace: sriov-fec-system - resourceVersion: "2935834" - selfLink: /apis/sriovfec.intel.com/v2/namespaces/sriov-fec-system/sriovfecnodeconfigs/controller-0 - uid: 1a39b2a6-7512-4f44-8a64-083df7e480f3 + resourceVersion: "204896" + uid: bb5d5443-0ac3-4a5b-863f-1d81717979bf spec: + drainSkip: true physicalFunctions: - bbDevConfig: acc100: @@ -606,7 +724,6 @@ following |vRAN| |FEC| accelerators: numQueueGroups: 4 maxQueueSize: 1024 numVfBundles: 1 - pfMode: false uplink4G: aqDepthLog2: 4 numAqsPerGroups: 16 @@ -615,13 +732,13 @@ following |vRAN| |FEC| accelerators: aqDepthLog2: 4 numAqsPerGroups: 16 numQueueGroups: 4 - pciAddress: "0000:17:00.0" + pciAddress: 0000:8a:00.0 pfDriver: vfio-pci vfAmount: 1 vfDriver: vfio-pci status: conditions: - - lastTransitionTime: "2022-09-29T20:33:13Z" + - lastTransitionTime: "2024-06-24T18:21:06Z" message: Configured successfully observedGeneration: 2 reason: Succeeded @@ -632,12 +749,155 @@ following |vRAN| |FEC| accelerators: - deviceID: 0d5c driver: vfio-pci maxVirtualFunctions: 16 - pciAddress: "0000:17:00.0" + pciAddress: 0000:8a:00.0 vendorID: "8086" virtualFunctions: - deviceID: 0d5d driver: vfio-pci - pciAddress: "0000:18:00.0" + pciAddress: 0000:8b:00.0 + pfBbConfVersion: v24.03-0-g1bbb3ac + + - An example of VRB1 status after applying 1 |VF| configuration. + + .. code-block:: none + + $ kubectl get sriovvrbnodeconfigs.sriovvrb.intel.com -n sriov-fec-system controller-0 -o yaml + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbNodeConfig + metadata: + creationTimestamp: "2024-05-17T01:35:36Z" + generation: 2 + name: controller-0 + namespace: sriov-fec-system + resourceVersion: "9659405" + uid: 4db81a14-2ddf-4fc3-9f09-939ece5fd33a + spec: + drainSkip: true + physicalFunctions: + - bbDevConfig: + vrb1: + downlink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 0 + downlink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + fftLut: + fftChecksum: "" + fftUrl: "" + maxQueueSize: 1024 + numVfBundles: 1 + qfft: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + uplink4G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 0 + uplink5G: + aqDepthLog2: 4 + numAqsPerGroups: 16 + numQueueGroups: 4 + pciAddress: 0000:f7:00.0 + pfDriver: vfio-pci + vfAmount: 1 + vfDriver: vfio-pci + status: + conditions: + - lastTransitionTime: "2024-06-27T22:35:50Z" + message: Configured successfully + observedGeneration: 2 + reason: Succeeded + status: "True" + type: Configured + inventory: + sriovAccelerators: + - deviceID: 57c0 + driver: vfio-pci + maxVirtualFunctions: 16 + pciAddress: 0000:f7:00.0 + vendorID: "8086" + virtualFunctions: + - deviceID: 57c1 + driver: vfio-pci + pciAddress: 0000:f7:00.1 + pfBbConfVersion: v24.03-0-g1bbb3ac + + - An example of VRB2 status after applying 1 |VF| configuration. + + .. code-block:: none + + $ kubectl get sriovvrbnodeconfigs.sriovvrb.intel.com -n sriov-fec-system controller-0 -o yaml + apiVersion: sriovvrb.intel.com/v1 + kind: SriovVrbNodeConfig + metadata: + creationTimestamp: "2024-06-26T20:32:51Z" + generation: 2 + name: controller-0 + namespace: sriov-fec-system + resourceVersion: "9400270" + uid: 31a7325e-d943-400b-aa14-2449d2d019c3 + spec: + drainSkip: true + physicalFunctions: + - bbDevConfig: + vrb2: + downlink4G: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 0 + downlink5G: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 8 + fftLut: + fftChecksum: "" + fftUrl: "" + maxQueueSize: 1024 + numVfBundles: 1 + qfft: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 8 + qmld: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 8 + uplink4G: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 0 + uplink5G: + aqDepthLog2: 5 + numAqsPerGroups: 64 + numQueueGroups: 8 + pciAddress: "0000:07:00.0" + pfDriver: vfio-pci + vfAmount: 1 + vfDriver: vfio-pci + status: + conditions: + - lastTransitionTime: "2024-06-26T22:27:05Z" + message: Configured successfully + observedGeneration: 2 + reason: Succeeded + status: "True" + type: Configured + inventory: + sriovAccelerators: + - deviceID: 57c2 + driver: vfio-pci + maxVirtualFunctions: 64 + pciAddress: "0000:07:00.0" + vendorID: "8086" + virtualFunctions: + - deviceID: 57c3 + driver: vfio-pci + pciAddress: "0000:07:00.1" + pfBbConfVersion: v24.03-0-g1bbb3ac - An example of N3000 status after applying 2 |VFs| configuration. @@ -647,13 +907,12 @@ following |vRAN| |FEC| accelerators: apiVersion: sriovfec.intel.com/v2 kind: SriovFecNodeConfig metadata: - creationTimestamp: "2022-10-21T18:17:55Z" + creationTimestamp: "2024-06-26T23:18:46Z" generation: 2 name: controller-0 namespace: sriov-fec-system - resourceVersion: "2011601" - selfLink: /apis/sriovfec.intel.com/v2/namespaces/sriov-fec-system/sriovfecnodeconfigs/controller-0 - uid: 05db8606-8236-4efd-99bb-7b5ca20cd02e + resourceVersion: "1206023" + uid: 2946a968-aa5e-4bec-8ad7-1a3fca678c1b spec: drainSkip: true physicalFunctions: @@ -686,12 +945,12 @@ following |vRAN| |FEC| accelerators: vf6: 0 vf7: 0 pciAddress: 0000:1c:00.0 - pfDriver: pci-pf-stub + pfDriver: vfio-pci vfAmount: 2 vfDriver: vfio-pci status: conditions: - - lastTransitionTime: "2022-10-21T19:35:18Z" + - lastTransitionTime: "2024-06-26T23:22:54Z" message: Configured successfully observedGeneration: 2 reason: Succeeded @@ -700,7 +959,7 @@ following |vRAN| |FEC| accelerators: inventory: sriovAccelerators: - deviceID: 0d8f - driver: pci-pf-stub + driver: vfio-pci maxVirtualFunctions: 8 pciAddress: 0000:1c:00.0 vendorID: "8086" @@ -711,89 +970,42 @@ following |vRAN| |FEC| accelerators: - deviceID: 0d90 driver: vfio-pci pciAddress: 0000:1c:00.2 + pfBbConfVersion: v24.03-0-g1bbb3ac - - An example of ACC200 status after applying 1 |VF| configuration. +#. Modify |FEC| or |VRB| Cluster config. + + #. To further modify |FEC| or |VRB| device configuration, make desired + modifications to the sriov-fec/vrb custom resource file and re-apply. + + - ACC100 and N3000 + + .. code-block:: none + + $ kubectl apply -f .yaml + sriovfecclusterconfig.sriovfec.intel.com/config configured + + - VRB1 and VRB2 + + .. code-block:: none + + $ kubectl apply -f .yaml + sriovvrbclusterconfig.sriovvrb.intel.com/config configured + +#. Delete ``SriovFecClusterConfig`` or ``SriovVrbClusterConfig``. + + - ACC100 and N3000 .. code-block:: none - $ kubectl get sriovfecnodeconfigs.sriovfec.intel.com -n sriov-fec-system controller-0 -o yaml - apiVersion: sriovfec.intel.com/v2 - kind: SriovFecNodeConfig - metadata: - creationTimestamp: "2022-10-21T18:31:41Z" - generation: 3 - name: controller-0 - namespace: sriov-fec-system - resourceVersion: "2159562" - selfLink: /apis/sriovfec.intel.com/v2/namespaces/sriov-fec-system/sriovfecnodeconfigs/controller-0 - uid: e4e536fc-a777-4e26-974d-71226d43c4ed - spec: - drainSkip: true - physicalFunctions: - - bbDevConfig: - acc200: - downlink4G: - aqDepthLog2: 4 - numAqsPerGroups: 16 - numQueueGroups: 2 - downlink5G: - aqDepthLog2: 4 - numAqsPerGroups: 16 - numQueueGroups: 4 - maxQueueSize: 1024 - numVfBundles: 1 - qfft: - aqDepthLog2: 4 - numAqsPerGroups: 16 - numQueueGroups: 4 - uplink4G: - aqDepthLog2: 4 - numAqsPerGroups: 16 - numQueueGroups: 2 - uplink5G: - aqDepthLog2: 4 - numAqsPerGroups: 16 - numQueueGroups: 4 - pciAddress: 0000:f7:00.0 - pfDriver: vfio-pci - vfAmount: 1 - vfDriver: vfio-pci - status: - conditions: - - lastTransitionTime: "2022-10-21T19:48:26Z" - message: Configured successfully - observedGeneration: 3 - reason: Succeeded - status: "True" - type: Configured - inventory: - sriovAccelerators: - - deviceID: 57c0 - driver: vfio-pci - maxVirtualFunctions: 16 - pciAddress: 0000:f7:00.0 - vendorID: "8086" - virtualFunctions: - - deviceID: 57c1 - driver: vfio-pci - pciAddress: 0000:f7:00.1 + $ kubectl delete -f .yaml + sriovfecclusterconfig.sriovfec.intel.com "config" deleted -#. Modify |FEC| Cluster config. - - #. To further modify |FEC| device configuration, make desired - modifications to the sriov-fec custom resource file and re-apply. + - VRB1 and VRB2 .. code-block:: none - $ kubectl apply -f .yaml - sriovfecclusterconfig.sriovfec.intel.com/config configured - -#. Delete ``SriovFecClusterConfig``. - - .. code-block:: none - - $ kubectl delete -f .yaml - sriovfecclusterconfig.sriovfec.intel.com "config" deleted + $ kubectl delete -f .yaml + sriovvrbclusterconfig.sriovvrb.intel.com "config" deleted #. Configure |VFIO| for |PF| interface. @@ -834,30 +1046,26 @@ following |vRAN| |FEC| accelerators: the application. You can get the token using the command :command:`system helm-override-show sriov-fec-operator sriov-fec-operator sriov-fec-system`. - - To configure ACC100, N3000 and ACC200 in vfio mode, you should provide - ``sriovFecClusterConfig`` with - ``spec.physicalFunction.pfDriver: vfio-pci``. + - To configure ACC100, N3000, VRB1 and VRB2 in vfio mode, you + should provide ``SriovFecClusterConfig`` or ``SriovVrbClusterConfig`` + with ``spec.physicalFunction.pfDriver: vfio-pci``. -#. Switch between Static method configuration and Operator method. +#. Switch from Static method configuration to Operator method. - #. Delete ``SriovFecClusterConfig``. - - #. Remove ``sriov-fec-operator`` using the command - :command:`system application-remove`. - - #. Apply the configuration using :command:`system host-device-modify`, - see :ref:`Enable ACC100/ACC200 Hardware Accelerators for Hosted vRAN Containerized Workloads `. + #. Delete configuration using the static method. + :command:`system host-device-modify controller-0 pci_0000_f7_00_0 --driver igb_uio --vf-driver none -N 0` .. rubric:: |postreq| -- See :ref:`Set Up Pods to Use SRIOV to Access ACC100/ACC200 HW Accelerators +- See :ref:`Set Up Pods to Use SRIOV to Access HW Accelerators `. - Resource Request: The resource name for |FEC| |VFs| configured with |SRIOV| |FEC| operator must be ``intel.com/intel_fec_acc100`` for ACC100, - ``intel.com/intel_fec_5g`` for N3000 and ``intel.com/intel_fec_acc200`` for - ACC200 when requested in a pod spec unless the resource name was modified - using the `system helm-override-update` command. + ``intel.com/intel_fec_5g`` for N3000, ``intel.com/intel_fec_acc200`` for + VRB1 and ``intel.com/intel_vrb_vrb2`` for VRB2 when requested in a pod + spec unless the resource name was modified using the + `system helm-override-update` command. - Resource request for ACC100. @@ -869,6 +1077,26 @@ following |vRAN| |FEC| accelerators: limits: intel.com/intel_fec_acc100: '16' + - Resource request for VRB1. + + .. code-block:: none + + resources: + requests: + intel.com/intel_fec_acc200: '16' + limits: + intel.com/intel_fec_acc200: '16' + + - Resource request for VRB2. + + .. code-block:: none + + resources: + requests: + intel.com/intel_vrb_vrb2: '64' + limits: + intel.com/intel_vrb_vrb2: '64' + - Resource request for N3000. .. code-block:: none @@ -879,68 +1107,41 @@ following |vRAN| |FEC| accelerators: limits: intel.com/intel_fec_5g: '2' - - Resource request for ACC200. - - .. code-block:: none - - resources: - requests: - intel.com/intel_fec_acc200: '16' - limits: - intel.com/intel_fec_acc200: '16' - -- vfio-token: (in case of vfio mode) - - An application pod can get the |VFIO| token through a pod environment - variable. - - For example, reference the pod spec section for vfio token injection. - - .. code-block:: none - - env: - - name: SRIOV_FEC_VFIO_TOKEN - value: "02bddbbf-bbb0-4d79-886b-91bad3fbb510" - - .. note:: - - The application can get the existing vfio-token using the command below, - if the user updates the custom vfio-token. - :command:`system helm-override-show sriov-fec-operator sriov-fec-operator sriov-fec-system` - - If the vfio-token is available by default, it will not be displayed in the - output file. - - .. note:: - - Use the default vfio-token for testing purposes only. - - Run the following command once the application pod is ready to get the |PCI| - address of the allocated |FEC| device along with the |VFIO| token when - applicable. + address of the allocated |FEC| or |VRB| device along with the |VFIO| token + when applicable. - ACC100 .. code-block:: none - sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep FEC - PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100=0000:32:00.0 - SRIOV_FEC_VFIO_TOKEN=02bddbbf-bbb0-4d79-886b-91bad3fbb510 + sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep PCI + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100=0000:8b:00.0 + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100_INFO={"0000:8b:00.0":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:8b:00.0"},"vfio":{"mount":"/dev/vfio/vfio"}}} - - ACC200 + - VRB1 .. code-block:: none - sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep FEC - PCIDEVICE_INTEL_COM_INTEL_FEC_ACC200=0000:f7:00.0 - SRIOV_FEC_VFIO_TOKEN=02bddbbf-bbb0-4d79-886b-91bad3fbb510 + sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep PCI + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC200_INFO={"0000:f7:00.1":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:f7:00.1"},"vfio":{"mount":"/dev/vfio/vfio"}}} + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC200=0000:f7:00.1 + + - VRB2 + + .. code-block:: none + + sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep PCI + PCIDEVICE_INTEL_COM_INTEL_VRB_VRB2_INFO={"0000:07:00.1":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:07:00.1"},"vfio":{"mount":"/dev/vfio/vfio"}}} + PCIDEVICE_INTEL_COM_INTEL_VRB_VRB2=0000:07:00.1 - N3000 .. code-block:: none - sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep FEC - PCIDEVICE_INTEL_COM_INTEL_FEC_5G=0000:1f:00.0 + sysadmin@controller-0:~$ kubectl exec -ti app-pod -- env | grep PCI + PCIDEVICE_INTEL_COM_INTEL_FEC_5G_INFO={"0000:1c:00.1":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:1c:00.1"},"vfio":{"mount":"/dev/vfio/vfio"}}} + PCIDEVICE_INTEL_COM_INTEL_FEC_5G=0000:1c:00.1 - Applications that are using |FEC| |VFs| when the |PF| interface is bound with the ``vfio-pci`` driver, should provide the ``vfio-token`` to the |VF| @@ -948,4 +1149,4 @@ following |vRAN| |FEC| accelerators: For example, a sample |DPDK| application can provide ``vfio-vf-token`` via Environment Abstraction Layer (EAL) parameters. - :command:`./test-bbdev.py -e="--vfio-vf-token=$SRIOV_FEC_VFIO_TOKEN -a$PCIDEVICE_INTEL_COM_INTEL_FEC_ACC200"` + :command:`./test-bbdev.py -e="--vfio-vf-token=02bddbbf-bbb0-4d79-886b-91bad3fbb510 -a$PCIDEVICE_INTEL_COM_INTEL_FEC_ACC200"` diff --git a/doc/source/node_management/kubernetes/hardware_acceleration_devices/set-up-pods-to-use-sriov.rst b/doc/source/node_management/kubernetes/hardware_acceleration_devices/set-up-pods-to-use-sriov.rst index 38bbb7610..8980a0809 100644 --- a/doc/source/node_management/kubernetes/hardware_acceleration_devices/set-up-pods-to-use-sriov.rst +++ b/doc/source/node_management/kubernetes/hardware_acceleration_devices/set-up-pods-to-use-sriov.rst @@ -2,28 +2,45 @@ .. ggs1611608368857 .. _set-up-pods-to-use-sriov: -================================================================ -Set Up Pods to Use SRIOV to Access ACC100/ACC200 HW Accelerators -================================================================ +================================================== +Set Up Pods to Use SRIOV to Access HW Accelerators +================================================== -You can configure pods with |SRIOV| access to a ACC100/ACC200 devices by adding the -appropriate 'resources' request in the pod specification. +You can configure pods with |SRIOV| access to a HW accelerator device by +adding the appropriate 'resources' request in the pod specification. .. rubric:: |context| The following procedure shows an example of launching a container image with -'resources' request for a |VF| to the ACC100/ACC200 devices. +'resources' request for a |VF| to the ACC100 device. .. rubric:: |proc| -#. Source the platform environment. +#. Request the resource name of the desired accelerator. .. code-block:: none - $ source /etc/platform/openrc ~(keystone_admin)$ + ~(keystone_admin)$ kubectl get node controller-0 -o json | json_pp | grep -A 15 '"allocatable"' + "allocatable" : { + "cpu" : "126", + "ephemeral-storage" : "9417620260", + "hugepages-1Gi" : "10Gi", + "hugepages-2Mi" : "0", + "intel.com/intel_fec_acc100" : "16", + "memory" : "503822580Ki", + "pods" : "110" + }, + "capacity" : { + "cpu" : "128", + "ephemeral-storage" : "10218772Ki", + "hugepages-1Gi" : "10Gi", + "hugepages-2Mi" : "0", + "intel.com/intel_fec_acc100" : "16", + "memory" : "525674740Ki", -#. Create a pod.yml file that requests 16 ACC100/ACC200 VFs - (i.e. intel.com/intel_acc100_fec: '16') + +#. Create a pod.yml file that requests 16 ACC100 VFs + (i.e. intel.com/intel_fec_acc100: '16') .. code-block:: none @@ -64,12 +81,12 @@ The following procedure shows an example of launching a container image with resources: requests: memory: 4Gi - intel.com/intel_acc100_fec: '16' + intel.com/intel_fec_acc100: '16' windriver.com/isolcpus: 24 limits: hugepages-1Gi: 2Gi memory: 4Gi - intel.com/intel_acc100_fec: '16' + intel.com/intel_fec_acc100: '16' windriver.com/isolcpus: 24 volumes: - name: hugepage @@ -90,13 +107,13 @@ The following procedure shows an example of launching a container image with .. code-block:: none - ~(keystone_admin)$ kubectl exec -it pod0 -- bash echo - $PCIDEVICE_INTEL_COM_INTEL_ACC100_FEC + ~(keystone_admin)$ kubectl exec -it pod0 -- env | grep PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100 - The following PCI addresses corresponding to the |SRIOVs| are displayed: + The following PCI addresses and "VFIO_TOKEN" corresponding to the |SRIOVs| are displayed: .. code-block:: none - 0000:86:01.1,0000:86:01.0,0000:86:01.7,0000:86:01.4,0000:86:00.3,0000:86:00.1,0000:86:00.5,0000:86:00.7,0000:86:00.2,0000:86:00.4,0000:86:01.5,0000:86:01.6,0000:86:01.2,0000:86:00.0,0000:86:00.6,0000:86:01.3 + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100=0000:32:00.5,0000:32:00.1,0000:32:00.2,0000:32:00.4,0000:32:01.0,0000:32:01.1,0000:32:01.2,0000:32:01.3,0000:32:00.0,0000:32:00.6,0000:32:00.7,0000:32:01.4,0000:32:01.7,0000:32:00.3,0000:32:01.6,0000:32:01.5 + PCIDEVICE_INTEL_COM_INTEL_FEC_ACC100_INFO={"0000:32:00.0":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.0"},"vfio":{"dev-mount":"/dev/vfio/315","mount":"/dev/vfio/vfio"}},"0000:32:00.1":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.1"},"vfio":{"dev-mount":"/dev/vfio/316","mount":"/dev/vfio/vfio"}},"0000:32:00.2":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.2"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:00.3":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.3"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:00.4":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.4"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:00.5":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.5"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:00.6":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.6"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:00.7":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:00.7"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.0":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.0"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.1":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.1"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.2":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.2"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.3":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.3"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.4":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.4"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.5":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.5"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.6":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.6"},"vfio":{"mount":"/dev/vfio/vfio"}},"0000:32:01.7":{"extra":{"VFIO_TOKEN":"02bddbbf-bbb0-4d79-886b-91bad3fbb510"},"generic":{"deviceID":"0000:32:01.7"},"vfio":{"mount":"/dev/vfio/vfio"}}} diff --git a/doc/source/sample_apps/flexran/deploy-flexran-2203-on-starlingx-1d1b15ecb16f.rst b/doc/source/sample_apps/flexran/deploy-flexran-2203-on-starlingx-1d1b15ecb16f.rst index 3c18665ef..14f0d0e38 100644 --- a/doc/source/sample_apps/flexran/deploy-flexran-2203-on-starlingx-1d1b15ecb16f.rst +++ b/doc/source/sample_apps/flexran/deploy-flexran-2203-on-starlingx-1d1b15ecb16f.rst @@ -142,7 +142,7 @@ platform: **Coyote Pass** (housing ICX-SP). system host-unlock $NODE #. After the system has been unlocked and available for the first time, - configure ACC100/ACC200 : + configure ACC100/VRB1 : .. code:: bash diff --git a/doc/source/shared/abbrevs.txt b/doc/source/shared/abbrevs.txt index 4656d5583..9a6ad8a8d 100755 --- a/doc/source/shared/abbrevs.txt +++ b/doc/source/shared/abbrevs.txt @@ -221,6 +221,7 @@ .. |VNIs| replace:: :abbr:`VNIs (VXLAN Network Interfaces)` .. |VPC| replace:: :abbr:`VPC (Virtual Port Channel)` .. |vRAN| replace:: :abbr:`vRAN (virtualized Radio Access Network)` +.. |VRB| replace:: :abbr:`VRB (vRAN Boost)` .. |VRF| replace:: :abbr:`VRF (Virtual Routing and Forwarding)` .. |VRFs| replace:: :abbr:`VRFs (Virtual Routing and Forwarding)` .. |VTEP| replace:: :abbr:`VTEP (Virtual Tunnel End Point)` @@ -228,4 +229,4 @@ .. |VXLANs| replace:: :abbr:`VXLANs (Virtual eXtensible Local Area Networks)` .. |WAD| replace:: :abbr:`WAD (Windows Active Directory)` .. |XML| replace:: :abbr:`XML (eXtensible Markup Language)` -.. |YAML| replace:: :abbr:`YAML (YAML Ain't Markup Language)` \ No newline at end of file +.. |YAML| replace:: :abbr:`YAML (YAML Ain't Markup Language)`