用户注册



邮箱:

密码:

用户登录


邮箱:

密码:
记住登录一个月忘记密码?

发表随想


还能输入:200字
云代码 - python代码库

test

2019-10-16 作者: 云代码会员举报

[python]代码库

import random
import time

import EnvSetup.EnvSetup
import Library.UFS.TestInstance as TestInstance
# import config.UFS
import os
import copy
import math
import csv
from Library.UFS.ufsDefines import *
from Library.Generic import timer
# Extra Imports
from collections import namedtuple
import collections


class Latency_00_Command_Base(TestInstance.TestInstance):
    SIZE_1KB = 1024
    SIZE_1MB = 1024 * SIZE_1KB
    SIZE_1GB = 1024 * SIZE_1MB
    LBA_SIZE = 4 * SIZE_1KB
    lbaStepSize = SIZE_1GB / LBA_SIZE
    DEBUG = False

    vcc = -1
    vccq = -1
    vccq2 = -1

    atariCategory = 'Latency'
    atariGroup = 'TSM'
    outputUnit = 'ms'
    pwrMode = 'Active'

    exitOnFailure = False
    verbose = False
    type = 'Scsi'

    def AddCommandArgs(self):
        super(Latency_00_Command_Base, self).AddCommandArgs()
        # configure divice
        self.parser.add_argument('--mediaLunCount', type=int, default=1, help="""configure device with defalut(0ne) lun""")
        self.parser.add_argument('--bootLunCount', type=int, default=0, help="""configure device with default(default 0) boot Lun""")
        self.parser.add_argument('--competitor', type=bool, default=False, help="""whether the device comes from competitor. defalut False""")
        self.parser.add_argument('--tgtProvisionType', type=int, default=3, help="""provisioning type. discard,2,TPRZ=0/erase,3,TPRZ=1""")

        # argument for precondition
        self.parser.add_argument('--seqTransferTL', type=int, default=0x80, help="""sequential write TL, default 512K""")
        self.parser.add_argument('--seqData', type=int, default=0xAA, help="""sequential Write Data""")
        self.parser.add_argument('--randNumber', type=int, default=100000, help="""random write cmd count per GB""")
        self.parser.add_argument('--randData', type=int, default=0x55, help="""random write data""")

        # argument for data collection and flow contorl
        self.parser.add_argument('--basicCmdNum', type=int, default=100000, help="""command count for each iteration/GB""")
        self.parser.add_argument('--totalCmdNum', type=int, default=100000, help="""Sample points needed for data collection""")

        # argument for debug function
        # self.parser.add_argument('--specialSaveRawData', type=bool, default=False, help="""For special format save raw data collection""")
        self.parser.add_argument('--vuLatencyEn', type=bool, default=False, help="""If using latency VU, should use latency FW. print data profiling debug.""")
        self.parser.add_argument('--dumpCache', type=bool, default=False, help="""If necessary, open this. each cache count, script will print command info and vu data""")
        self.parser.add_argument('--vuCMDTO', type=float, default=30.0, help="""For read/sync/other command time out parameter. if command > 30ms, timeout""")
        self.parser.add_argument('--vuWriteTO', type=float, default=30.0, help="""Just for write, if command > 30ms, timeout""")
        self.parser.add_argument('--cacheCount', type=int, default=8, help="""save how many command into cache.""")

        # Others
        self.parser.add_argument('--defrageTable', type=bool, default=False, help="""dirty card or not""")

    def _reconfigDevice(self, bSecureRemovalTypeSet=0, bProvisionTypeReset=3):
        if not self.competitor:
            self.vuLib.vuCmdtranslate.restoreWriteOnce()
        response = self.lun[0].Query.ReadDescriptorSync(0x1, 0, 0, 0x90)
        reconfigBuffer = response.DataSegmentBuffer
        if reconfigBuffer.GetByte(2) != 0:
            reconfigBuffer.SetByte(2, 0)
        reconfigBuffer.SetByte(7, bSecureRemovalTypeSet)
        self.lun[0].Query.WriteDescriptorSync(0x1, 0, 0, 0x90, reconfigBuffer)

        maxAllocUinit = self.lib.query.getdEnhanced1MaxNAllocU()
        HighByte = (int(maxAllocUinit) & 0xFF00) >> 8
        LowByte = int(maxAllocUinit) & 0xFF

        response = self.lun[0].Query.ReadDescriptorSync(1, 0, 0, 0x90)
        ReconfigBuf = response.DataSegmentBuffer

        for i in range(1, 9):
            ReconfigBuf.SetByte(i * 16 + 0xA, bProvisionTypeReset)
            ReconfigBuf.SetByte(i * 16, 0)
            ReconfigBuf.SetByte(i * 16 + 1, 0)
            ReconfigBuf.SetByte(i * 16 + 6, 0)
            ReconfigBuf.SetByte(i * 16 + 7, 0)

        ReconfigBuf.SetByte(0x16, HighByte)
        ReconfigBuf.SetByte(0x17, LowByte)
        ReconfigBuf.SetByte(0x10, 1)
        ReconfigBuf.SetByte(0x2, 0)

        self.lun[0].Query.WriteDescriptorSync(1, 0, 0, 0x90, ReconfigBuf)

    def _doBKOPs(self, defautTime=400):
        buf = self.mist.Buffer(0x1, LBA_SIZE)
        t = timer(defautTime)
        while not t.expired:
            if self.lib.query.getbBackgroundOpStatus() > 0:
                wExceptionEventStatus = self.lib.query.getwExceptionEventStatus()
                self.logger.info("send one read command")
                self.lun[self.lunID].Read10SimpleSync(0x1, 0x1, buf)
                self.logger.info("--------Get wExceptionEventStatus:{}".format(wExceptionEventStatus & 0x4))
                time.sleep(60)
            else:
                break
        currentBKOPs = self.lib.query.getbBackgroundOpStatus()
        if currentBKOPs == 0 or currentBKOPs == 1:
            self.logger.info("current BKOPs has been completed")
        else:
            self.testFailed('bBackgroundOpStatus is still {} after {}'.format(currentBKOPs, defautTime))

    def _newTargetConfg(self, bootOrNot=False):
        if not self.competitor:
            self.logger.info("########## Create new target configuration descriptor ##########")
            if bootOrNot:
                bootCapacity = 16
                self.lib.lunConfig.createTargetConfig(mediaLunCount=self.mediaLunCount, bootLunCount=self.bootLunCount, setBigLun=True,
                                                      useAllCapacity=True, setHighPriorityLun=False,
                                                      bProvisioningType=self.tgtProvisionType, bootCapacity_MB=bootCapacity)
            else:
                self.lib.lunConfig.createTargetConfig(mediaLunCount=self.mediaLunCount, bootLunCount=self.bootLunCount, setBigLun=True,
                                                      useAllCapacity=True, setHighPriorityLun=False, bProvisioningType=self.tgtProvisionType)
            self.lib.lunConfig.printDescriptor()  # Print the new created target descriptor

            disabledLun = self.lib.lunConfig.getDisableLunInTargetConfig()
            self.logger.info("Disabled LUs are: %s" % disabledLun)

            targetCfgOutline = self.lib.lunConfig.getTargetConfigOutline()
            for key, value in targetCfgOutline.items():
                self.logger.info("Category {} : {}".format(key, value))

            # Write the new target configuration descriptor to device
            self.lib.lunConfig.writeTargetConfigToDevice()
        else:
            self.logger.info("the unit comes from competitor")

    def _latencyOfPurge(self, purgeTimeOut=None):
        """
        Original purgeTimeoOut=4000
        Purge all unmaped LBA(s)
        NOTE: This latency is in increments of 1ms

        :param purgeTimeOut: (int) in seconds the timeout period to wait for the purge command
        :return usPurgeTime: (float) command execution time in microseconds (us)
        """
        self.logger.info("***********************do purge*********************")
        if purgeTimeOut is None:
            purgeTimeOut = self.cfg.timeout.purgeTimeout
        # Just for leo, to pass this case.
        if self.product == "Leo":
            purgeTimeOut = 10000
        setPurgeResp = self.lib.query.setfPurgeEnable()
        if setPurgeResp == 1:
            purgeStartTime = time.time()
        else:
            self.testFailed(
                "Set fPurgeEnable flag enable fail, and the fPurgeEnable flag value is {}".format(setPurgeResp))

        bPurgeStatus = -1
        while bPurgeStatus not in (PURGE_STATUS_STOPPED_BY_HOST, PURGE_STATUS_COMPLETED):
            bPurgeStatus = self.lib.query.getbPurgeStatus()
            if time.time() - purgeStartTime > purgeTimeOut:
                self.testFailed("FAIL bPurgeStatus ({}) not set to {} before timeOut {}".format(setPurgeResp, (
                    PURGE_STATUS_STOPPED_BY_HOST, PURGE_STATUS_COMPLETED), purgeTimeOut))

        purgeEndTime = time.time()
        secondsPurgeTime = purgeEndTime - purgeStartTime
        usPurgeTime = self.lib.generic.changeUnits("us", "seconds", secondsPurgeTime)
        return usPurgeTime

    def _eraseFullCardPost(self):
        self.logger.info("pre/post condition: format unit and purge")
        lunList = self.lib.reportLuns.getAvailableNormalLuns()
        for lunID in lunList:
            self.lun[lunID].FormatUnitSimpleSync()
        self._latencyOfPurge()

    def _defragTableVB(self):
        tl = 1
        buf = self.mist.Buffer(1, 8)
        maxLba = self.lun[self.lunID].ReadCapacity10Sync(buf).MaxLba
        writeBuffer = self.mist.Buffer(tl, LBA_SIZE)
        writeBuffer.Fill(0x55)
        for i in range(12):
            startLba = random.randint(0, 1020)
            while startLba < maxLba:
                if startLba + tl > maxLba:
                    startLba = maxLba - tl
                self.lun[self.lunID].Write10Sync(lba=startLba, count=tl, disablePageOut=False, fua=False, fuaNv=False, buf=writeBuffer)
                startLba = startLba + 1024

    def _sendWriteCMD(self, startLba, endLba, transferLen=1, writeData=0xAA, cmdNumber=0, seq=False, QD=32):
        outstandingCmd = {}
        responseList = []
        timeout = 60
        buffers = [self.mist.Buffer(transferLen, LBA_SIZE) for _ in range(QD)]
        buffTrk = range(QD)
        temp = 0
        if seq:
            self.logger.debug("sequential write from startLba=--{}-- to endLba=--{}--".format(startLba, endLba))
            while (startLba <= endLba):
                if startLba + transferLen > endLba:
                    tempTL = endLba - startLba + 1
                else:
                    tempTL = transferLen
                if len(outstandingCmd) < QD:
                    buffNum = buffTrk.pop()
                    buf = buffers[buffNum]
                    buf.Fill(writeData)
                    usr = self.lun[self.lunID].Write10Simple(startLba, tempTL, buf)
                    outstandingCmd[usr.SequenceId] = (buffNum)
                    if len(outstandingCmd) == QD:
                        startTime = time.time()
                        while len(responseList) == 0:
                            elapsedTime = time.time() - startTime
                            if elapsedTime > timeout:
                                break
                            try:
                                responseList = self.lun[self.lunID].CompletedResponses
                            except self.mist.ufs.UpiuCommandError as e:
                                self.testFailed("UpiuCommandError happened {}".format(e))
                            except Exception as e:
                                self.testFailed("Exception happened {}".format(e))
                        for resp in responseList:
                            sequenceId = resp.SequenceId
                            buffNum = outstandingCmd.pop(sequenceId)
                            buffTrk.append(buffNum)
                    responseList = []
                    startLba += tempTL
            if startLba == endLba + 1:
                if 0 < len(outstandingCmd) < QD:
                    for i in range(len(outstandingCmd)):
                        self.lun[self.lunID].WaitForOneResponse()
        else:
            if cmdNumber:
                self.logger.debug("random send cmdNumber={} write command in startLba={},endLba={}".format(cmdNumber, startLba, endLba))
                while (temp < cmdNumber):
                    if len(outstandingCmd) < QD:
                        buffNum = buffTrk.pop()
                        buf = buffers[buffNum]
                        buf.Fill(writeData)
                        tempLba = random.randint(startLba, endLba - transferLen + 1)
                        usr = self.lun[self.lunID].Write10Simple(tempLba, transferLen, buf)
                        temp = temp + 1
                        outstandingCmd[usr.SequenceId] = (buffNum)
                        if len(outstandingCmd) == QD:
                            startTime = time.time()
                            while len(responseList) == 0:
                                elapsedTime = time.time() - startTime
                                if (elapsedTime > timeout) and len(responseList) == 0:
                                    self.testFailed("Timeout occurred after %s,no responses returned " % elapsedTime)
                                    break
                                try:
                                    responseList = self.lun[self.lunID].CompletedResponses
                                except self.mist.ufs.UpiuCommandError as e:
                                    self.testFailed("UpiuCommandError happened {}".format(e))
                                except Exception as e:
                                    self.testFailed("Exception happened {}".format(e))
                            for resp in responseList:
                                sequenceId = resp.SequenceId
                                buffNum = outstandingCmd.pop(sequenceId)
                                buffTrk.append(buffNum)
                        responseList = []
                if temp == cmdNumber:
                    for i in range(len(outstandingCmd)):
                        self.lun[self.lunID].WaitForOneResponse()
            else:
                writeBuffer = self.mist.Buffer(transferLen, LBA_SIZE)
                writeBuffer.Fill(writeData)
                self.logger.debug("random send write command on current lbaRange until all LBA has been written")
                self.lib.writeReadAsync.writeRandom(self.lunID, "Write10", writeBuffer, startLba, endLba, transferLen, QD, fua=True)

    def _overwriteCli(self):
        self.responseLoggerIsEnabled = False
        self.responseLoggerReportUpiu = False
        self.responseLoggerReportFailOnly = True
        self.needstandardTestStart = False

    def _preConditionForSAS(self, SSPreCondition=False):
        if self.DEBUG:
            self.logger.info('DEBUG running, precondition skipped')
            return
        self.logger.info("sequential write full card with ChunkSize *** 512KB ***, data is 0xAA")
        buff = self.mist.Buffer(1, 8)
        endLba = self.lun[self.lunID].ReadCapacity10Sync(buff).MaxLba
        self._sendWriteCMD(0, endLba, transferLen=self.seqTransferTL, seq=True)
        if not SSPreCondition:
            self.logger.info("============Random write 100000 per GB with pattern 0x55============")
            for startLba, endLba in self.LbaRange:
                self._sendWriteCMD(startLba, endLba, writeData=self.randData, cmdNumber=self.randNumber)
        else:
            self.logger.info("precondition for SS's test method")
            cmdNumber = self.maxLba / self.seqTransferTL
            self._sendWriteCMD(0, self.maxLba, transferLen=self.seqTransferTL, writeData=self.randData, cmdNumber=cmdNumber)
        if self.defrageTable:
            self._defragTableVB()

    def _preCondition(self, lunID, loopSize=1000):
        """
        Precondition options you can overwrite by children classes.

        :param lunID: current lun
        :return oldValues: (dict) the values before this method changed them
        :return avgTeUnRe: (float) average test unit ready time
        :return secIdleBoundary: (float) time to put device in idle mode in seconds
        """
        newValues = {}
        if hasattr(self, 'newWce') and self.newWce >= 0:
            newValues["newWce"] = self.newWce
        if hasattr(self, 'newRcd') and self.newRcd >= 0:
            newValues["newRcd"] = self.newRcd
        if hasattr(self, 'gearHS') and self.gearHS >= 0:
            newValues["gearHS"] = self.gearHS
        if hasattr(self, 'rateHS') and self.rateHS != "None":
            newValues["rateHS"] = self.rateHS
        if hasattr(self, 'ufsPwrMode') and self.powerMode >= 0:
            newValues["ufsPwrMode"] = self.ufsPwrMode
        if hasattr(self, 'laneNum') and self.laneNum >= 0:
            newValues["laneNum"] = self.laneNum
        if hasattr(self, 'bDataReliability') and self.bDataReliability >= 0:
            newValues["bDataReliability"] = self.bDataReliability

        newValues["newVoltages"] = self.lib.performanceAdHoc.getVoltages(
            vcc=self.vcc, vccq=self.vccq, vccq2=self.vccq2)

        oldValues = self.lib.performanceAdHoc.latencyPreCondition(lunID=lunID, newValues=dict(newValues), verbose=False,
                                                                  exitOnFailure=self.exitOnFailure)
        self.logger.parameter(paramName="newValues", value=newValues, category=self.atariCategory, group=self.atariGroup)
        self.logger.parameter(paramName="oldValues", value=oldValues, category=self.atariCategory, group=self.atariGroup)

        (usAvgTeUnRe, _, _) = self.lib.performanceAdHoc.testUnitReadyLoopAndPrint(lunID=lunID, loopSize=loopSize, verbose=False)
        avgTeUnRe = self.lib.generic.changeUnits(self.outputUnit, "us", usAvgTeUnRe)
        self.logger.parameter(paramName="Test Unit Ready Average", value=avgTeUnRe, unit=self.outputUnit, category=self.atariCategory, group=self.atariGroup)

        if self.pwrMode == "Idle":
            (_, rawBoundary, rawUnit) = self.cfg.latency.getLatencyExpected("Typical", "Active to Idle")
            idleBoundary = self.lib.generic.changeUnits(self.outputUnit, rawUnit, rawBoundary)
        else:
            idleBoundary = None

        preCondition = namedtuple("preCondition", "oldValues avgTeUnRe secIdleBoundary")
        return preCondition(oldValues=oldValues, avgTeUnRe=avgTeUnRe, secIdleBoundary=idleBoundary)

    def _dumpCacheData(self):
        self.logger.info("===================Dump CMD/VU Buffer=======================")
        self.logger.info("NOTE: CMD Info Format: (cmdType, lun, lba, transferLen, taskTag, sequenceId, exec(ms)")
        self.logger.info("Debug. cache cmd count:{}".format(len(self.vuCacheDict)))
        for cmdKey, vuValue in self.vuCacheDict.items():
            tempList = []
            tempList.append(cmdKey)
            self.logger.info("cmdKey(ms):{}".format(cmdKey))
            self.logger.info("vuValue:{}".format(vuValue))
            for index, tempValue in enumerate(vuValue):
                tempList.append(tempValue)
        self._saveRawData(["TimeOut_VU_Get_Latency"], [tempList])
        # if the whole cache data are dumped, clear cache.
        self.vuCacheDict.clear()

    def _vuLatency(self, cmdInfo, checkTO=True):
        # cmdInfo = (cmdType, lun, lba, transferLen, taskTag, sequenceId, execTime)
        cmdType = cmdInfo[0]
        execTime = cmdInfo[-1]
        if self.cacheCount:
            if len(self.vuCacheDict) >= self.cacheCount:
                if self.dumpCache:
                    self._dumpCacheData()
                else:
                    # if no dump data, then pop one item for the next cmd info.
                    self.vuCacheDict.popitem(0)

            vuData = self.vuLib.vuCmdtranslate.getProfileDataLatency()
            self.vuCacheDict[cmdInfo] = vuData

        # Check command whether timeout.if yes, test failed immediately.
        if cmdType == 'Mode Sense':
            self.vuLatencyTO = 1
        if cmdType == "Mode Select":
            self.vuLatencyTO = 100
        self.logger.info("111----set command timeout {}ms for cmdType {}".format(self.vuLatencyTO, cmdType))

        if checkTO and execTime >= self.vuLatencyTO:
            if self.cacheCount:
                self._dumpCacheData()
            try:
                vuData = self.vuLib.vuCmdtranslate.getProfileDataLatency()
                self.logger.info("Oops! Occurred cmd time out!, TargetTO:{} ms".format(self.vuLatencyTO))
                self.logger.info("NOTE: CMD Info Format: (cmdType, lun, lba, transferLen, taskTag, sequenceId, exec(ms)")
                self.logger.info("CMD Info(ms):{}, ".format(cmdInfo))
                self.testFailed("GetProfileData:{}".format(vuData))
            except self.mist.ufs.UpiuCommandError as e:
                self.logger.info("Get profileData error:{}".format(e))

    def _postCondition(self, lunID, oldValues):
        """
        Postcondition options you can overwrite by children classes.

        :param lunID:  current lun
        :param oldValues: (dict) the values to set the device back to before ending the next test
        """
        self.lib.performanceAdHoc.latencyPostCondition(lunID=lunID, oldValues=oldValues)

    def _afterLoopBeforeAveraging(self, usLatencies):
        """
        Some scripts need to modify the latencies before proceeding with the test

        :param lunID: (int) current lun
        :param usLatencies: [int] The latencies from the main function in micro seconds
        :returns latencies: [int] The modified latencies
        """
        latencies = self.lib.generic.changeUnits(self.outputUnit, "us", usLatencies)

        return latencies

    def _splitdrive(self, p):
        """Split a pathname into drive and path specifiers. Returns a 2-tuple
    "(drive,path)";  either part may be empty"""
        if p[1:2] == ':':
            return p[0:2], p[2:]
        return '', p

    def split(self, p):
        """Split a pathname.

        Return tuple (head, tail) where tail is everything after the final slash.
        Either part may be empty."""

        d, p = self._splitdrive(p)
        # set i to index beyond p's last slash
        i = len(p)
        while i and p[i - 1] not in '/\\':
            i = i - 1
        head, tail = p[:i], p[i:]  # now tail has no slashes
        # remove trailing slashes from head, unless it's all slashes
        head2 = head
        while head2 and head2[-1] in '/\\':
            head2 = head2[:-1]
        head = head2 or head
        return d + head, tail

    def _openNewFile(self, fName):
        # arg maybe include new csv file
        if fName:
            tempCsv = fName + "_Latencies_data.csv"
        else:
            tempCsv = "Command_Latencies_data.csv"
        if self.logFilePath.endswith('.log'):
            subPath2, _ = self.split(self.logFilePath)
            subPath1, _ = self.split(subPath2)
            filePath, _ = self.split(subPath1)
        else:
            filePath = self.logFilePath
        fileName = os.path.join(filePath, tempCsv)
        if os.path.exists(fileName):
            self.logger.info("-------file exist--------")
        return fileName

    def _calSTD(self, sortedRawList=None, avgTime=0):
        n = len(sortedRawList)
        if n == 1:
            return 0
        tempList = [pow((item - avgTime), 2) for item in sortedRawList]
        stdDeviation = math.sqrt(sum(tempList) / float(n - 1))
        return stdDeviation

    def classOfNines(self, data, outliers=100):

        """
        For quality of service (QoS) we usually need to print not only the maximum but also the 99% largest element
        :param data:        All data points collected, probably latencies
        :type data:         list
        :param outliers:    Have at least this many outliers before calculating that class of nines
        :type outliers:     int
        :return results:    The class of nines results
        :rtype results:     dict

        Example
        results = classOfNines(range(1, 10000000+1))
        print [(key, results[key]) for key in sorted(results.keys())]
        """
        data.sort()
        results = {}
        nines = [0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999, 0.99999999, 0.999999999, 0.9999999999]
        n = 0
        listLen = len(data)
        while True:
            if not listLen / 10 ** (n + 2) >= outliers:
                break
            index = int(nines[n] * listLen) - 1
            results[nines[n]] = data[index]
            n += 1
        return results

    def _calAllData(self, cmdType, cmdName, rawData, sampleNumber=None):
        """
        Statistics of raw latency data
        :param cmdType: Category of the operation, eg, QoS, SCSI, UPIU
        :param cmdName: Details of the cmd, eg, 4K read dirty
        :param rawData: raw latency data
        :param sampleNumber: the count of data points collected
        :return: statistics data list, eg, [['QoS', '4K read dirty', 'Min', 20], ['QoS', '4K read dirty', 'Max', 30]...]
        """
        rawDataList = copy.deepcopy(rawData)

        if not sampleNumber:
            sampleNumber = len(rawData)

        rawDataList.sort()
        if len(rawDataList) == 1:
            avgTime = rawDataList[0]
            maxTime = rawDataList[0]
            minTime = rawDataList[0]
            median = rawDataList[0]
            stdDeviation = 0  # standard deviation
        else:
            avgTime = float(sum(rawDataList)) / len(rawDataList)
            maxTime = max(rawDataList)
            minTime = min(rawDataList)
            sortedTimes = sorted(rawDataList)
            if (len(sortedTimes) == 0):
                median = 0
            else:
                if (len(sortedTimes) % 2) == 1:
                    median = sortedTimes[len(sortedTimes) / 2]
                else:
                    median = (sortedTimes[(len(sortedTimes) / 2) - 1] + sortedTimes[(len(sortedTimes) / 2)]) / 2
            stdDeviation = self._calSTD(rawDataList, avgTime)
        keys = []
        values = []
        countNum = 0
        nines = self.classOfNines(rawDataList)
        nines1 = sorted(nines.iteritems(), key=lambda d: d[1], reverse=False)
        if len(rawDataList) < 10000:
            values = ["NA", "NA", "NA"]
        else:
            for key, value in nines1:
                keys.append(key)
                values.append(value)
                countNum = countNum + 1
                if countNum == 3:
                    break
            if len(keys) == 1:
                keys.extend(["NA","NA"])
                values.extend(["NA","NA"])
            if len(keys) == 2:
                keys.append("NA")
                values.append("NA")
        allData = [
            [cmdType] + [cmdName] + ['Min'] + [minTime],
            [cmdType] + [cmdName] + ['Avg'] + [avgTime],
            [cmdType] + [cmdName] + ['Median'] + [median],
            [cmdType] + [cmdName] + ['Std Dev'] + [stdDeviation],
            [cmdType] + [cmdName] + ['99%'] + [values[0]],
            [cmdType] + [cmdName] + ['99.9%'] + [values[1]],
            [cmdType] + [cmdName] + ['99.99%'] + [values[2]],
            [cmdType] + [cmdName] + ['Max'] + [maxTime],
            [cmdType] + [cmdName] + ['sampleNumber'] + [sampleNumber]
        ]
        # allData = [cmdType] + [CMD] + [minTime, avgTime, median, stdDeviation] + values + [maxTime, sampleNumber]
        self.logger.info("-----------------------------------------------------------------------------------------------------------")
        self.logger.info("{:<16} {:<48} {:<8} {:<16} {:<8} {:<16} {:<8} {:<8} {:<8} {:<8} {:<8}".
                         format("cmdType", "CMD", "MinTime", "AVERAGE", "Median", "STD", "99%", "99.9%", "99.99%",
                                "MaxTime", "sampleNumber"))
        self.logger.info("{:<16} {:<48} {:<8} {:<16} {:<8} {:<16} {:<8} {:<8} {:<8} {:<8} {:<8}".
                         format(cmdType, cmdName, minTime, avgTime, median, stdDeviation, values[0],
                                values[1], values[2], maxTime, sampleNumber))
        self.logger.info("------------------------------------------------------------------------------------------------------------")
        return allData

    def _handleLatencies(self, allData, fName=None):
        # allData---type: list or int(no raw data)
        # file name---type str without(.csv):new csv file  default:

        fileName = self._openNewFile(fName)
        wrFileHdlr = open(fileName, "ab+")
        wt = csv.writer(wrFileHdlr)
        for i in xrange(len(allData)):
            data = allData[i]
            wt.writerow(data)
        wrFileHdlr.close()

    def _saveRawData(self, nameList, rawDataList):
        """
        Save raw latency data to csv file
        :param nameList: a list which contains the cm names, used in csv file naming
        :param rawDataList: raw data corresponding to nameList
        :return: None
        """
        for i in range(len(nameList)):
            if self.logFilePath.endswith('.log'):
                rawDataFile = self.logFilePath.rstrip('.log') + "_" + nameList[i] + "_raw_data.csv"
            else:
                rawDataFile = self.logFilePath + "/" + self.logFileName[:-4]  + "_" + nameList[i] + "_raw_data.csv"
            wrFileHdlr = open(rawDataFile, "a+")
            for execTime in rawDataList[i]:
                wrFileHdlr.write("{}\n".format(execTime))
            wrFileHdlr.close()

    def _rawDataFileName(self, name):
        """
        Get target raw data file, related to func _saveRawData()
        :param name: cmd name
        :return: absolute raw data file path
        """

        if self.logFilePath.endswith('.log'):
            rawDataFile = self.logFilePath.rstrip('.log') + "_" + name + "_raw_data.csv"
        else:
            rawDataFile = self.logFilePath + "/" + self.logFileName[:-4] + "_" + name + "_raw_data.csv"
        return rawDataFile

    def _nandTemperaturePrint(self):
        """Print the current NAND temperature in Celsius
        """
        temp = self.lib.vuHighLevelLib.getNandTemperature()
        self.logger.info(temp)


    def _generateCommandIndex(self, startLba=0, endLba=0, chunkSize=1, randCount=100000, percentage=50):
        # random address list without overlap
        def randrange(start, end, size, randCount):
            if ((end - start) < (size - 1)) or (len(lbaList) == randCount):
                return 0
            lba = random.randint(start, end - size + 1)
            lbaList.append(lba)
            randrange(start, lba - 1, size, randCount)
            randrange(lba + size, end, size, randCount)

        # Make sure keep more than 50% UDA after unmap. if necessary, should rerun the test to collect 100,000
        UDAPerct = (endLba - startLba + 1) * percentage / 100
        gpNum = UDAPerct / chunkSize
        sampleCnt = gpNum if gpNum < randCount else randCount
        if chunkSize == 1:
            cmdIndex = [[tempLba, tempLba + chunkSize - 1] for tempLba in
                        random.sample(xrange(startLba, endLba + 1 - chunkSize + 1), sampleCnt)]
        else:
            lbaList = []
            randrange(startLba, endLba, chunkSize, randCount)
            cmdIndex = [[tempLba, tempLba + chunkSize - 1] for tempLba in random.sample(lbaList, sampleCnt)]

        return cmdIndex

    def _getUnmapTimeQD(self, unmapCMDIndex=None, unmapSize=0x1, queueDepth=8):
        responseList = []
        unmapLatencies = []
        outstandingCmd = {}
        outstandingCmdTotalCnt = 0
        while (unmapCMDIndex):
            if (len(outstandingCmd) < queueDepth):
                currentCmd = unmapCMDIndex.pop(0)
                startLba = currentCmd[0]
                usr = self.lun[self.lunID].UnmapSimple(startLba, unmapSize)
                outstandingCmdTotalCnt += 1
                outstandingCmd[usr.SequenceId] = ('Unmap', 0, currentCmd[0], unmapSize)
            else:
                t = timer(30)
                while (not t.expired) and (len(responseList) == 0):
                    try:
                        responseList = self.lun[self.lunID].CompletedResponses
                    except Exception as e:
                        self.testFailed("Exception occurred: %s" % e)
                if t.expired:
                    self.testFailed("Timeout occurred after %s,no responses returned " % t)

                for resp in responseList:
                    execTime = float(resp.ExecTime * 10 ** -3)
                    sequenceId = resp.SequenceId
                    unmapLatencies.append(execTime)
                    outstandingCmd.pop(sequenceId)
                    # cmdType, buffNum, randLba, tranfLen = outstandingCmd.pop(sequenceId)

                    # # For fw request, add vu to get command's ingo
                    # if self.vuLatencyEn and not self.competitor:
                    #     cmdInfo = (cmdType, self.lunID, randLba, tranfLen, resp.TaskTag, sequenceId, execTime)
                    #     self._vuLatency(cmdInfo)

                responseList = []

        while len(outstandingCmd):
            try:
                responseList = self.lun[self.lunID].WaitForAllResponses()
            except Exception as e:
                self.testFailed("Exception occurred: %s" % e)

            for resp in responseList:
                execTime = float(resp.ExecTime * 10 ** -3)
                sequenceId = resp.SequenceId
                unmapLatencies.append(execTime)
                cmdType, buffNum, randLba, tranfLen = outstandingCmd.pop(sequenceId)
                # For fw request, add vu to get command's info
                #     if self.vuLatencyEn and not self.competitor:
                #         cmdInfo = (cmdType, self.lunID, randLba, tranfLen, resp.TaskTag, sequenceId, execTime)
                #         self._vuLatency(cmdInfo)
                #
                # # If dump the remaining cache data.
                # if self.vuLatencyEn and not self.competitor and self.dumpCache and len(self.vuCacheDict):
                #     self.logger.info("The last vcCacheDice length:{}".format(len(self.vuCacheDict)))
                #     self._dumpCacheData()
        return unmapLatencies

    def _collectUnmapLatencies(self, unmapSize=0x1, QD=8, totalCnt=100000, maxUnmapPert=100):
        unmapLatenciesList = []
        unmapCMDIndex = self._generateCommandIndex(0, self.maxLba, chunkSize=unmapSize, randCount=totalCnt,
                                                   percentage=maxUnmapPert)
        unmapLatenciesList.extend(self._getUnmapTimeQD(unmapCMDIndex, unmapSize, QD))
        while (len(unmapLatenciesList) < totalCnt):
            self.logger.info('{} sampling points collected, switch to next iteration'.format(len(unmapLatenciesList)))
            self._preConditionForSAS()
            unmapCMDIndex = self._generateCommandIndex(0, self.maxLba, chunkSize=unmapSize,
                                                       randCount=(totalCnt - len(unmapLatenciesList)))
            unmapLatenciesList.extend(self._getUnmapTimeQD(unmapCMDIndex, unmapSize, QD))
        return unmapLatenciesList

    def _getVariables(self, lunID):
        """
        Return the variable needed for this script to run. This method is overwritten by children classes.

        :param lunID: (int) Current LUN
        :returns cmdCalls ([functions]) The commands that the latencies are to be taken from
        :returns cmdNames ([str]) Names of the commands in cmdCalls. Set to None if you don't want latencies from that command.
        :returns isSequential (bool) When looping through should commands be run in order like 111122223333 (True) or 123123123123 (False)
        """
        def buffer_():
            return self.mist.Buffer(sectorCount=1, bytesPerSector=LBA_SIZE)

        def inquiry():
            return self.lun[lunID].InquirySync(enableVitalProductData=True, pageCode=0, buf=buffer_())

        cmdCalls = [inquiry]
        cmdNames = ["Inquiry"]
        isSequential = True

        getVariables = namedtuple("getVariables", "cmdCalls cmdNames isSequential")
        return getVariables(cmdCalls=cmdCalls, cmdNames=cmdNames, isSequential=isSequential)

    def _enterAndExitH8(self, delay=0):
        H8_Exit_lat = []
        respHibern8Enter = self.lib.dme.hibernateEnter()
        if (respHibern8Enter != 0):
            self.addFailure("Hibern8 enter failed. status is {}  ".format(respHibern8Enter))

        self.logger.info('Step2.1-----Wait 5s, and monitor Iccq2---')
        time.sleep(delay * 1.0 / 1000)
        # self.lib.powerMeasurement.measureVccCurrent(1000, delay=1)

        if delay == 0:
            one_million_samples = 1 * 1000 * 1000
            self.lib.powerMeasurement.measureVccqVccq2Current(samples=2 * one_million_samples, sampleRate=250, test="Idle",
                                                              temperature="25C", hibernate=True)

        self.logger.info("Step2.2-----Exit hibernate -------")

        response = self.lun[self.lun.keys()[0]].Dme.HibernateExit()
        if ((response.Argument2 & 0xFF) != 0):
            self.addFailure("Hibern8 Exit DME command failed. error code is {}  ".format(response.Argument2))
        else:
            self.logger.info("exec time of exit hibernate is {}us".format(response.ExecTime))
            H8_Exit_lat.append(response.ExecTime)

    def getBigLunId(self):
        lunList = self.lib.reportLuns.getAvailableNormalLuns()
        bigLun = 0
        maxLbaBigLun = 0
        for lunId in lunList:
            buf = self.mist.Buffer(1, 8)
            tempLba = self.lun[lunId].ReadCapacity10Sync(buf).MaxLba
            if tempLba > maxLbaBigLun:
                bigLun = lunId
                maxLbaBigLun = tempLba
        maxUnmapLbaSize = self.lib.unmapAsync._getMaxUnmapLbaSize(bigLun)
        intCapacityGB = maxLbaBigLun / self.lbaStepSize
        maxLba = intCapacityGB * self.lbaStepSize
        lbaMainStart = 0
        LbaRange = [(startLba, endLba) for startLba, endLba in
                    zip(range(lbaMainStart, maxLba - self.lbaStepSize, self.lbaStepSize), range(lbaMainStart + self.lbaStepSize - 1, maxLba, self.lbaStepSize))]
        return bigLun, maxLbaBigLun, LbaRange, maxUnmapLbaSize

    def getLatencies(self, cmdCalls, isSequential, secIdleBoundary):
        usLatencies = self.lib.performanceAdHoc.loopCmd(
            command=cmdCalls, loopSize=self.totalCmdNum, sequential=isSequential, idleSleep=secIdleBoundary, verbose=self.verbose)
        return usLatencies

    def Test(self):
        """
        This is the main test function of the TC Script.
        """
        self._eraseFullCardPost()
        self.logger.info("<Step1> PRECONDITION: Lun config")
        self._newTargetConfg()
        self.lunID, self.maxLba, self.LbaRange, self.maxUnmapLbaSize = self.getBigLunId()
        self.vuCacheDict = collections.OrderedDict()
        self.vuLatencyTO = self.vuCMDTO
        self.logger.info("Lun {}".format(self.lunID))

        # ####### VARIABLES ####### #
        cmdCalls, cmdNames, isSequential = self._getVariables(self.lunID)
        self.cmdNames = cmdNames

        # ####### PRECONDITION ####### #
        self.logger.info("<Step2> Send 1000 * TUR to measure host overhead ")
        oldValues, avgTeUnRe, idleBoundary = self._preCondition(lunID=self.lunID)
        secIdleBoundary = self.lib.generic.changeUnits("seconds", self.outputUnit, idleBoundary)

        # ####### MAIN ####### #
        self.logger.info("<Step3> Send {} * {} commands to measure latency".format(self.totalCmdNum, cmdNames))
        usLatencies = self.getLatencies(cmdCalls, isSequential, secIdleBoundary)

        latencies = self._afterLoopBeforeAveraging(usLatencies=usLatencies)

        self._saveRawData(self.cmdNames, latencies)
        # make sure to use len(latencies) in case of duplicated cmdNames
        for i in xrange(len(latencies)):
            allData = self._calAllData(self.type, cmdNames[i], latencies[i])
            self._handleLatencies(allData, self.logFileName[:-4])

        self.logger.debug("POSTCONDITION")
        self._postCondition(lunID=self.lunID, oldValues=oldValues)
        self._eraseFullCardPost()


def Main():
    test = Latency_00_Command_Base()
    exit(test.Run())

if __name__ == "__main__":
    Main()

[源代码打包下载]




网友评论    (发表评论)

共1 条评论 1/1页

发表评论:

评论须知:

  • 1、评论每次加2分,每天上限为30;
  • 2、请文明用语,共同创建干净的技术交流环境;
  • 3、若被发现提交非法信息,评论将会被删除,并且给予扣分处理,严重者给予封号处理;
  • 4、请勿发布广告信息或其他无关评论,否则将会删除评论并扣分,严重者给予封号处理。


扫码下载

加载中,请稍后...

输入口令后可复制整站源码

加载中,请稍后...