工作记录:项目基础库文件BaseFun.py

#!/usr/bin/env python
# coding=utf-8

import os
import datetime
import hmac
import base64
import time
import requests
import filecmp
import HTMLTestRunner
import urllib2
import hashlib
import json
import re
import redis
import psycopg2
import subprocess
import collections
import binascii
import urllib
from json import *
from hashlib import sha1
from requests_toolbelt import MultipartEncoder
import xml.dom.minidom
from xml.etree import ElementTree
import inspect
import os
from IPy import IP

try:
    import xml.etree.cElementTree as ET
except ImportError:
    import xml.etree.ElementTree as ET
curDir = os.path.abspath(‘.‘) + os.path.sep
filePath = curDir

try:
    from cStringIO import StringIO as BytesIO  # py2

    bytes_chr = chr
except ImportError:
    from io import BytesIO  # py3

    bytes_chr = lambda c: bytes([c])

CHUNK_BITS = 22
CHUNK_SIZE = 1 << CHUNK_BITS  # == 2 ** 22 == 4 * 1024 * 1024 == 4MiB

ONE_MB = 1024 * 1024  # 1M的大小

# 获取当前执行函数的名称
def get_current_function_name():
    return inspect.stack()[1][3]

#解析出ip带子网掩码的具体ip范围,如59.61.78.142/28
def decide_ip_subnetmask(ip_address):
    ip_list = []
    ip = IP(ip_address,make_net=1)
    for i in ip:
        ip_list.append(str(i))
    return ip_list

#获取俩个ip之前所有ip,返回list数据
def decide_ip_somerange(ip1,lastnum):
    startnum = ip1.split(‘.‘)[-1]
    bf3  = ‘.‘.join(ip1.split(‘.‘)[:3])
    ip_list = []
    if int(startnum) >int(lastnum):
        return None
    for last in range(int(startnum),int(lastnum)+1):
        ip_list.append(bf3+‘.‘+str(last))
    return ip_list

‘‘‘
complete合成接口200返回数据解析
@xmldata xml字符串信息
@return_status 返回的状态码
@expected_bucket 预期的空间
@expected_key 预期返回的文件名称
‘‘‘

def complete_xml_200(xmldata, return_status, expected_bucket, expected_key):
    success = ‘########################################\n                 SUCCESS\n########################################‘
    fail = ‘########################################\n                 FAIL\n########################################‘
    if xmldata != ‘‘:
        if return_status == ‘200‘:
            print  u‘>>>>正确返回的status:{0}‘.format(return_status)
            rootxml = ElementTree.fromstring(xmldata)
            for bucket_data in rootxml.findall("{http://wcs.chinanetcenter.com/document}Bucket"):
                bucket_data_text = bucket_data.text
                if bucket_data != ‘‘ and bucket_data_text == expected_bucket:
                    print u‘>>>>正确返回的bucket:{0}‘.format(bucket_data_text)
                    rootxml = ElementTree.fromstring(xmldata)
                    for key_data in rootxml.findall("{http://wcs.chinanetcenter.com/document}Key"):
                        key_data_text = key_data.text
                        if key_data != ‘‘ and key_data_text == expected_key:
                            print u‘>>>>正确返回的key:{0}‘.format(key_data_text)
                            rootxml = ElementTree.fromstring(xmldata)
                            for ETag_data in rootxml.findall("{http://wcs.chinanetcenter.com/document}ETag"):
                                ETag_data_text = ETag_data.text
                                if ETag_data_text != ‘‘:
                                    print u‘>>>>正确返回的ETag:{0}‘.format(ETag_data_text)
                                    print success
                                    return ‘SUCCESS‘
                                else:
                                    print fail
                                    return ‘FAIL‘
                        else:
                            print u‘>>>>返回的key与预期不符,正确的key:{0},错误的key:{1}‘.format(expected_key, key_data_text)
                            print fail
                            return ‘FAIL‘
                else:
                    print u‘>>>>返回错空间与预期不符,正确的bucket:{0},错误的bucket:{1}‘.format(expected_bucket, bucket_data_text)
                    print fail
                    return ‘FAIL‘
        else:
            print u‘>>>>返回的status状态码与预期不一致,正确的status;200,错误的status:{0}‘.format(return_status)
            print fail
            return ‘FAIL‘
    else:
        print u‘>>>>获取到的xml内容为空!‘
        print fail
        return ‘FAIL‘

‘‘‘
将返回的数据写入到xml文档,为后面对返回数据做比较
@filename 待写入的文件地址
@data 待写入的数据
‘‘‘

def write_xml(filename, data):
    try:
        f = open(filename, ‘wb‘)
        f.write(data)
        f.close()
        return 0
    except IOError:
        print ‘无法生成文件!‘
        return -1

# 用于添加分片任务信息
def element_list_append_Upload(filename, uplaoadid, element=[]):
    ‘用于添加分片上传任务信息‘
    element.append([2, ‘Upload‘, None])
    element.append([3, ‘Key‘, filename])
    element.append([3, ‘StorageClass‘, ‘STANDARD‘])
    element.append([3, ‘UploadId‘, uplaoadid])

‘‘‘
@用于比较俩个列表的是否一致,并打印出不一致的地方;
‘‘‘

def comp_list(return_list, ture_list):
    if len(return_list) == len(ture_list):
        for i in range(0, len(return_list)):
            # 两个列表对应元素相同,则直接过
            if return_list[i] == ture_list[i]:
                pass
            else:  # 两个列表对应元素不同,则输出对应的索引
                print ‘第%d 个元素不一致,正确的元素 %s,返回错误的元素 %s‘ % (i + 1, ture_list[i], return_list[i])
                return ‘FAIL‘
    else:
        print ‘返回结果内容长度与预期的不一致.\n接口返回列表:%s,\n预期返回结果:%s‘ % (return_list, ture_list)
        return ‘FAIL‘

# 设置一些预设请求条件后值发生变化的参数
def set_otherelement(element_list=[], **kwargs):
    ‘用于设置一些预设请求条件后值发生变化的参数:KeyMarker,MaxUploads,NextKeyMarker,NextUploadIdMarker,Prefix‘
    for value in kwargs:
        if value == ‘KeyMarker‘:
            element_list[7][2] = kwargs[‘KeyMarker‘]
        if value == ‘MaxUploads‘:
            element_list[8][2] = kwargs[‘MaxUploads‘]
        if value == ‘NextKeyMarker‘:
            element_list[9][2] = kwargs[‘NextKeyMarker‘]
        if value == ‘NextUploadIdMarker‘:
            element_list[10][2] = kwargs[‘NextUploadIdMarker‘]
        if value == ‘Prefix‘:
            element_list[11][2] = kwargs[‘Prefix‘]
        if value == ‘IsTruncated‘:
            element_list[6][2] = kwargs[‘IsTruncated‘]
        if value == ‘Delimiter‘:
            element_list[2][2] = kwargs[‘Delimiter‘]

# 在最后添加UploadIdMarker
def element_list_append_UploadIdMarker(element, UploadIdMarker=None, ):
    ‘用于添加UploadIdMarker信息,默认是None‘
    element.append([2, ‘UploadIdMarker‘, UploadIdMarker])

def writexml(managerList, filename):
    # 在内存中创建一个空的文档
    doc = xml.dom.minidom.Document()
    # 创建一个根节点Managers对象
    root = doc.createElement(‘CompleteMultipartUpload‘)
    # 设置根节点的属性
    # root.setAttribute(‘company‘, ‘xx科技‘)
    # root.setAttribute(‘address‘, ‘科技软件园‘)
    # 将根节点添加到文档对象中
    doc.appendChild(root)
    for i in managerList:
        nodeManager = doc.createElement(‘Part‘)
        nodeName = doc.createElement(‘PartNumber‘)
        # 给叶子节点name设置一个文本节点,用于显示文本内容
        nodeName.appendChild(doc.createTextNode(str(i[‘PartNumber‘])))
        nodeAge = doc.createElement("ETag")
        nodeAge.appendChild(doc.createTextNode(str(i["ETag"])))
        # 将各叶子节点添加到父节点Manager中,
        # 最后将Manager添加到根节点Managers中
        nodeManager.appendChild(nodeName)
        nodeManager.appendChild(nodeAge)
        root.appendChild(nodeManager)
    with open(filePath + filename, ‘wb‘) as fp:
        doc.writexml(fp, indent=‘‘, addindent=‘‘, newl=‘‘, encoding="utf-8")

def getdate():
    # 获取http所需的GMT格式时间##############
    GMT_FORMAT = ‘%a, %d %b %Y %H:%M:%S GMT‘
    date = datetime.datetime.utcnow().strftime(GMT_FORMAT)
    return date

def getToolPath():
    # 获取token工具的绝对路径
    return os.path.abspath(‘.‘) + ‘/test_tool/wcs-token-swing-1.0.jar‘

def getUnixTime():
    # 获取UNIX 13位时间戳
    current_milli_time = lambda: int(round(time.time() * 1000))
    print current_milli_time()
    return current_milli_time()

def getUnixTimeSecond():
    # 获取UNIX 11位时间戳
    current_milli_time = lambda: int(round(time.time()))
    print current_milli_time()
    return current_milli_time()

def currentTimeFormatHour():
    # 获取当前时间,并格式化为:%Y%m%d%H,分钟容易出错,只查看日期对不对
    current_time_format = str(time.strftime(‘%Y-%m-%d-%H‘, time.localtime(time.time())))
    print ("生成年月日时格式:%s" % current_time_format)
    return current_time_format

def currentTimeFormatHourMin():
    # 获取当前时间,并格式化为:%Y%m%d%H,分钟容易出错,只查看日期对不对
    current_hour_format = str(time.strftime(‘%Y-%m-%d-%H‘, time.localtime(time.time())))
    current_min_format = str(time.strftime(‘%Y-%m-%d-%H-%M‘, time.localtime(time.time())))
    minStr = current_min_format.strip().split(‘-‘)[-1]
    print ("当前分钟数:%s" % minStr)
    minInt = int(minStr)
    if (minInt % 10) < 5:
        if minInt / 10 == 0:
            minStr = ‘00‘
        else:
            minStr = str(minInt / 10) + ‘0‘
    else:
        if minInt / 10 == 0:
            minStr = ‘05‘
        else:
            minStr = str(minInt / 10) + ‘5‘
    print ("生成年月日时分格式:%s" % (current_hour_format + ‘-‘ + minStr))
    return current_hour_format + ‘-‘ + minStr

def currentTimeFormatYearHourMin():
    # 获取当前时间,并格式化为:%Y%m%d%H,分钟容易出错,只查看日期对不对
    currentVosHourTimeformat = str(time.strftime(‘%Y:%H‘, time.localtime(time.time())))
    currentVosHourMinformat = str(time.strftime(‘%Y:%H:%M‘, time.localtime(time.time())))
    minStr = currentVosHourMinformat.strip().split(‘:‘)[-1]
    print ("当前分钟数:%s" % minStr)
    minInt = int(minStr)
    min1 = str(minInt / 10)
    minInt2 = minInt % 10
    if minInt2 < 5:
        min2 = ‘[0-4]‘
    else:
        min2 = ‘[5-9]‘
    print ("生成年时分正则表达式:%s" % (currentVosHourTimeformat + ‘:[‘ + min1 + ‘]‘ + min2))
    return currentVosHourTimeformat + ‘:[‘ + min1 + ‘]‘ + min2

def get_isotime(days=0, hours=0, minutes=0):
    # 获取iso时间,用于V4鉴权的时候用,V4是支持这种时间格式的
    if isinstance(days, int) and isinstance(hours, int) and isinstance(minutes, int):
        Daybefore = (datetime.datetime.now() + datetime.timedelta(days=days, hours=hours, minutes=minutes))
        timeStamp = int(time.mktime(Daybefore.timetuple()))
        afterdate = datetime.datetime.utcfromtimestamp(timeStamp)
        amzdate = afterdate.strftime(‘%Y%m%dT%H%M%SZ‘)
        return amzdate
    else:
        return -1

def get_utctime():
    # 获取UTC时间,用于V4鉴权,V4支持ISO与UTC时间
    GMT_FORMAT = ‘%a, %d %b %Y %H:%M:%S GMT‘
    return datetime.datetime.utcnow().strftime(GMT_FORMAT)

def utc2local():
    # UTC时间转本地时间(+8:00)
    now_stamp = time.time()
    local_time = datetime.datetime.fromtimestamp(now_stamp)
    utc_time = datetime.datetime.utcfromtimestamp(now_stamp)
    offset = local_time - utc_time
    local_st = datetime.datetime.utcnow() + offset
    GMT_FORMAT = ‘%Y%m%d%H%M%S‘
    localTime = local_st.strftime(GMT_FORMAT)
    print u‘获取到的当前时间是:\n‘, localTime
    return localTime

def getPutPostDetails(bucket, s3_url, queryStr):
    # 获取空间的GET与PUT的统计信息
    uri = ‘/‘ + bucket + ‘?‘ + queryStr
    url = s3_url + uri
    print u‘请求的URL为:\n‘ + url
    rq = requests.get(url)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rq.status_code), rq.headers, rq.text)
    return rq

def print_node(node):
    # 打印节点信息
    print "=============================================="
    print "node.attrib:%s" % node.attrib
    if node.attrib.has_key("age") > 0:
        print "node.attrib[‘age‘]:%s" % node.attrib[‘age‘]
    print "node.tag:%s" % node.tag
    print "node.text:%s" % node.text

def getAllXmlElemts(xml_data_tree):
    # 解析xml信息,xml_data_tree为xml字符串值:ElementTree.fromstring(rqGet.text)
    xmlEmlents = {}
    print u‘解析返回的xml信息:‘
    if ‘LifecycleConfiguration‘ in xml_data_tree.tag:
        for childelemt in xml_data_tree[0].getchildren():
            if ‘Filter‘ in childelemt.tag:
                for childelemtFilter in childelemt.getchildren():
                    print_node(childelemtFilter)
                    xmlEmlents[childelemtFilter.tag] = childelemtFilter.text
            elif ‘Expiration‘ in childelemt.tag:
                for childelemtExpiration in childelemt.getchildren():
                    print_node(childelemtExpiration)
                    xmlEmlents[childelemtExpiration.tag] = childelemtExpiration.text
            else:
                print_node(childelemt)
                xmlEmlents[childelemt.tag] = childelemt.text
    else:
        for childelemt in xml_data_tree.getchildren():
            print_node(childelemt)
            if ‘time‘ in childelemt.attrib:
                print u‘打印出每个节点的时间值:‘ + childelemt.attrib[‘time‘] + ‘\n‘
                # 时间格式转换
                timeArray = time.strptime(childelemt.attrib[‘time‘], ‘%Y-%m-%d %H:%M‘)
                otherStyleTime = time.strftime("%Y%m%d%H%M", timeArray)
                print u‘格式化之后的时间值为:‘ + otherStyleTime + ‘\n‘
                xmlEmlents[childelemt.tag + str(otherStyleTime)] = childelemt.text
            else:
                xmlEmlents[childelemt.tag] = childelemt.text
    return xmlEmlents

def bucket_lifecycle_op(region, method, bucket, body, date, ak, sk, s3_url, requestType=‘‘, host=‘‘, signType=‘‘):
    # 空间生命周期规则操作
    resource = ‘/‘ + bucket + ‘/?lifecycle‘
    print u‘resource信息:\n‘, resource
    contentType = ‘‘
    if body != ‘‘:
        print ‘完整body:\n‘, body
        contentMD5 = get_md5_value(body)
    else:
        contentMD5 = ‘‘
    # method = ‘PUT‘
    if signType == ‘‘:
        headers = generate_v2_heder_token(method, contentMD5, contentType, date, resource, ak, sk)
    if signType == ‘v4‘:
        dic = collections.OrderedDict()
        dic[‘lifecycle‘] = ‘‘
        request_parameters = urllib.urlencode(dic)
        if method == ‘PUT‘ and requestType == ‘bucketS3Url‘:
            headers = get_v4_header_token(region, method, bucket + ‘.‘ + host, bucket, ‘‘, request_parameters, date, ak,
                                          sk, body, ‘‘, ‘content-md5;host;x-amz-content-sha256;x-amz-date‘, contentMD5)
        elif method == ‘PUT‘:
            headers = get_v4_header_token(region, method, host, bucket, ‘‘, request_parameters, date, ak, sk, body, ‘‘,
                                          ‘content-md5;host;x-amz-content-sha256;x-amz-date‘, contentMD5)
        elif method != ‘PUT‘ and requestType == ‘bucketS3Url‘:
            headers = get_v4_header_token(region, method, bucket + ‘.‘ + host, bucket, ‘‘, request_parameters, date, ak,
                                          sk)
        else:
            headers = get_v4_header_token(region, method, host, bucket, ‘‘, request_parameters, date, ak, sk)
    if requestType == ‘bucketS3Url‘:
        s3url = ‘http://‘ + bucket + ‘.‘ + host + ‘/?lifecycle‘
    else:
        s3url = s3_url + resource
    print ‘s3请求url:\n‘, s3url
    if method == ‘PUT‘:
        rq = requests.put(url=s3url, data=body, headers=headers)
    elif method == ‘GET‘:
        rq = requests.get(url=s3url, headers=headers)
    else:
        rq = requests.delete(url=s3url, headers=headers)
    status = rq.status_code
    print ‘状态码code:{0}\n响应消息体:{1}\n响应头header:{2}\n‘.format(str(status), rq.text, rq.headers)
    return rq

def uploadAndStringbody(times, filepath, S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY, bucket, uploadUrl, prefix, filename):
    ##########上传文件##########
    body = ‘<Quiet>false</Quiet>‘
    # body = ‘<Quiet>true</Quiet>‘
    for i in range(1, times):
        print (‘上传的文件个数为:%s‘ % str(i))
        currentFile = filepath
        if filename == ‘‘:
            srcFileKey = prefix + str(i) + ‘.jpg‘
        else:
            srcFileKey = urllib.quote(filename)
        key_xml = ‘<Object><Key>‘ + srcFileKey + ‘</Key></Object>‘
        body = body + key_xml
        print u‘#####body组合#####\n‘, body
        uploadToken = generateUploadToken(S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY, bucket, srcFileKey, ‘1000000000‘,
                                          ‘1000000000‘, ‘1‘)
        rqUload = uploadFile(currentFile, uploadToken, uploadUrl)
        print u‘#####code#####\n‘, str(rqUload.status_code)
        # self.assertEqual(rqUload.status_code, 200)
    stringBody = ‘<Delete>‘ + body + ‘</Delete>‘
    print u‘#####完整stringBody#####\n‘, stringBody
    return stringBody

def generate_v2_heder_token(method, contentMD5, contentType, date, CanonicalizedResource, ak, sk,
                            CanonicalizedOBSHeaders=‘‘):
    # 生成V2的token
    headers = {}
    if CanonicalizedOBSHeaders == ‘‘:
        # token的算是 HMAC_SHA1算法,然后再次进行base64加密
        # HMAC运算利用哈希算法,以一个密钥和一个消息为输入,生成一个消息摘要作为输出
        stringToSign = method + ‘\n‘ + contentMD5 + ‘\n‘ + contentType + ‘\n‘ + date + ‘\n‘ + CanonicalizedResource
        print u‘鉴权信息stringToSign: ‘, stringToSign
        my_sign = hmac.new(sk, stringToSign, sha1).digest()
        signature = base64.b64encode(my_sign)
        token = ‘AWS‘ + ‘ ‘ + ak + ‘:‘ + signature
    else:
        # token的算是 HMAC_SHA1算法,然后再次进行base64加密
        # HMAC运算利用哈希算法,以一个密钥和一个消息为输入,生成一个消息摘要作为输出
        stringToSign = method + ‘\n‘ + contentMD5 + ‘\n‘ + contentType + ‘\n‘ + date + ‘\n‘ + CanonicalizedOBSHeaders + ‘\n‘ + CanonicalizedResource
        print u‘鉴权信息stringToSign: ‘, stringToSign
        my_sign = hmac.new(sk, stringToSign, sha1).digest()
        signature = base64.b64encode(my_sign)
        token = ‘AWS‘ + ‘ ‘ + ak + ‘:‘ + signature
    print u‘生成token信息:‘, token
    headers[‘Authorization‘] = token
    if date != ‘‘:
        headers[‘date‘] = date
    if contentMD5 != ‘‘:
        headers[‘content-md5‘] = contentMD5
    if contentType != ‘‘:
        headers[‘content-type‘] = contentType
    return headers

def get_v4_header_token(region, method, host, bucketname, filename, request_parameters, amzdate, ak, sk, postbody=‘‘,
                        contentlength=‘‘, signed_headers=‘host;x-amz-content-sha256;x-amz-date‘, contentMd5=‘‘,
                        contentType=‘‘, xAmzCopySource=‘‘):
    # 生成V4的token
    datestamp = datetime.datetime.utcnow().strftime(‘%Y%m%d‘)
    region = region
    service = ‘s3‘
    if request_parameters == ‘‘ and filename == ‘‘:
        resource_uri = ‘/‘ + bucketname
    elif request_parameters == ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename
    elif ‘lifecycle‘ in request_parameters:
        resource_uri = ‘/‘ + bucketname + ‘/?‘ + request_parameters
    elif filename == ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘?‘ + request_parameters
    else:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename + ‘?‘ + request_parameters
    if bucketname in host:
        resource_uri = ‘/‘ + filename
    print ("请求的uri信息:%s" % resource_uri)
    algorithm = ‘AWS4-HMAC-SHA256‘
    Credential = ak
    credential_scope = datestamp + ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    print ("第三个组合参数信息credential_scope:%s" % credential_scope)
    print ("signed_headers信息:%s" % signed_headers)
    if True:
        signing_key = getSignatureKey(sk, datestamp, region, service)
        if True:
            canonical_uri = resource_uri.split(‘?‘)[0]
            if request_parameters == ‘uploads‘:
                canonical_querystring = ‘uploads=‘
            else:
                canonical_querystring = request_parameters
            print ("canonical_querystring信息:%s" % canonical_querystring)
            payload_hash = hashlib.sha256(postbody).hexdigest()
            signed_headers_list = signed_headers.split(‘;‘)
            print ("分割之后的列表为:%s" % signed_headers_list)
            headers = {}
            for headerElemt in signed_headers_list:
                print ("获取的元素为:%s" % headerElemt)
                if headerElemt == ‘host‘:
                    headers[headerElemt] = host
                elif headerElemt == ‘x-amz-content-sha256‘:
                    headers[headerElemt] = payload_hash
                elif headerElemt == ‘x-amz-date‘:
                    headers[headerElemt] = amzdate
                elif headerElemt == ‘date‘:
                    headers[headerElemt] = amzdate
                elif headerElemt == ‘content-md5‘:
                    headers[headerElemt] = contentMd5
                elif headerElemt == ‘content-type‘:
                    headers[headerElemt] = contentType
                elif headerElemt == ‘x-amz-copy-source‘:
                    headers[headerElemt] = xAmzCopySource
                elif headerElemt == ‘content-length‘:
                    headers[‘content-length‘] = contentlength
                else:
                    headers[headerElemt] = ‘‘
            headers_str = ‘‘
            for k, v in sorted(headers.items()):
                headers_str += k + ":" + v + "\n"
            canonical_headers = headers_str
            if postbody == ‘‘:
                print ("无body的canonical_headers: %s" % canonical_headers)
            else:
                print ("请求带有body的canonical_headers: %s" % canonical_headers)
            canonical_request = method + ‘\n‘ + canonical_uri + ‘\n‘ + canonical_querystring + ‘\n‘ + canonical_headers + ‘\n‘ + signed_headers + ‘\n‘ + payload_hash
            print ("canonical_request信息: %s" % canonical_request)
        string_to_sign = algorithm + ‘\n‘ + amzdate + ‘\n‘ + credential_scope + ‘\n‘ + hashlib.sha256(
            canonical_request).hexdigest()
        signature = hmac.new(signing_key, (string_to_sign).encode(‘utf-8‘), hashlib.sha256).hexdigest()
        print ("signature信息: %s" % signature)
    authorization_header = algorithm + ‘ ‘ + ‘Credential=‘ + ak + ‘/‘ + credential_scope + ‘, ‘ + ‘SignedHeaders=‘ + signed_headers + ‘, ‘ + ‘Signature=‘ + signature
    print ("最后生成的token信息: %s" % authorization_header)
    headers[‘Authorization‘] = authorization_header
    return headers

# 获取v4的token
def get_v4_header_token2(method, host, hosturl, bucketname, filename, request_parameters, amzdate, ak, sk,
                         postbody=‘‘, signed_headers_dict=‘‘):
    ‘‘‘
    :param method:请求方法,如post
    :param host:请求的host。不带htpp,有端口的话,到端口结束
    :param hosturl: 请求的hosturl,不带子参数信息的,到端口结束
    :param bucketname:空间名称
    :param filename:文件名称,可带文件夹
    :param request_parameters:请求的资源参数信息(含子资源)
    :param amzdate:utc格式的时间
    :param ak:用户的ak
    :param sk:用户的sk
    :param postbody:请求上传的data数据(已读到缓存的信息)
    :return:
    ‘‘‘
    # -----固定变量
    datestamp = datetime.datetime.utcnow().strftime(‘%Y%m%d‘)
    # datestamp = ‘20170810‘
    region = ‘region98‘
    service = ‘s3‘
    request_parametersurlencode = request_parameters_urlencode(request_parameters)  # urlencode编码后的参数
    print ‘编码后的uri:{0}‘.format(request_parametersurlencode)
    # -------构造请求地址url地址信息
    if request_parameters == ‘‘ and filename == ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname
    elif request_parameters == ‘‘ and filename != ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename
    elif filename == ‘‘ and request_parameters != ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘?‘ + request_parametersurlencode
    elif request_parameters != ‘‘ and bucketname == ‘‘ and filename != ‘‘:
        resource_uri = ‘/‘ + filename + ‘?‘ + request_parametersurlencode
    elif request_parameters == ‘‘ and bucketname == ‘‘ and filename != ‘‘:
        resource_uri = ‘/‘ + filename
    elif request_parameters == ‘‘ and bucketname == ‘‘ and filename == ‘‘:
        resource_uri = ‘‘
    else:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename + ‘?‘ + request_parametersurlencode
    print ‘请求的uri信息:‘, resource_uri
    # request_url = hosturl+resource_uri #最后请求的url地址信息
    # print ‘请求的s3_v4 的地址信息:‘,request_url

    # -------构造token信息-----------------------------------------
    ##--第一个组合参数:固定参数类型
    algorithm = ‘AWS4-HMAC-SHA256‘
    ##--第二个组合参数:用户ak信息
    Credential = ak

    ##--第三个组合参数:credential_scope:datestamp+ ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    credential_scope = datestamp + ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    print ‘第三个组合参数信息:credential_scopev‘, credential_scope

    ##--第四个组合参数:SignedHeaders
    def get_signed_headers(list):
        key_list = sorted(list.keys())
        return ‘;‘.join(key_list)

    if signed_headers_dict == ‘‘:
        signed_headers = ‘host;x-amz-content-sha256;x-amz-date‘
    else:
        print ‘读取到的signed_headers_dict不为空,值为‘, signed_headers_dict
        signed_headers = get_signed_headers(signed_headers_dict)
    print ‘signed_headers信息:‘, signed_headers

    ##--第五个组合参数:signature:hmac.new(signing_key, (string_to_sign).encode(‘utf-8‘), hashlib.sha256).hexdigest()
    ###--内嵌构造组合1-----
    if True:
        signing_key = getSignatureKey(sk, datestamp, region, service)
        # print ‘signing_key信息:‘
        ###--内嵌构造组合2-----
        if True:
            # ---子参数canonical_uri
            canonical_uri = resource_uri.split(‘?‘)[0]
            print ‘canonical_uri 信息:‘, canonical_uri
            # ---子参数canonical_querystring
            canonical_querystring = request_parametersurlencode
            print ‘canonical_querystring 信息:‘, canonical_querystring
            # --子参数canonical_headers
            payload_hash = hashlib.sha256(postbody).hexdigest()
            if signed_headers_dict == ‘‘:
                headers = {‘host‘: host, ‘x-amz-content-sha256‘: payload_hash, ‘x-amz-date‘: amzdate}
                headers_str = ‘‘
                for k, v in sorted(headers.items()):
                    headers_str += k + ":" + v + "\n"
                canonical_headers = headers_str
                print ‘无body的canonical_headers:‘, canonical_headers
            else:
                headers = signed_headers_dict
                headers_str = ‘‘
                for k, v in sorted(headers.items()):
                    headers_str += k + ":" + v + "\n"
                canonical_headers = headers_str
                print ‘请求带有body的canonical_headers:‘, canonical_headers
            # --子参数signed_headers

            # --子资源payload_hash
            # payload_hash
            canonical_request = method + ‘\n‘ + canonical_uri + ‘\n‘ + canonical_querystring + ‘\n‘ + canonical_headers + ‘\n‘ + signed_headers + ‘\n‘ + payload_hash
            print ‘canonical_request信息:‘, canonical_request
        string_to_sign = algorithm + ‘\n‘ + amzdate + ‘\n‘ + credential_scope + ‘\n‘ + hashlib.sha256(
            canonical_request).hexdigest()
        signature = hmac.new(signing_key, (string_to_sign).encode(‘utf-8‘), hashlib.sha256).hexdigest()
        print ‘signature信息:‘, signature
    authorization_header = algorithm + ‘ ‘ + ‘Credential=‘ + ak + ‘/‘ + credential_scope + ‘, ‘ + ‘SignedHeaders=‘ + signed_headers + ‘, ‘ + ‘Signature=‘ + signature
    print ‘最后生成的token信息:‘, authorization_header
    headers[‘Authorization‘] = authorization_header  # 直接返回头部
    return headers

def s3FileOp(method, fileKey, ak, sk, bucket, date, s3_url, currentFile=‘‘):
    # 使用s3 put 上传文件
    print u‘请求操作为:\n‘, method
    contentMD5 = ‘‘
    if method != ‘POST‘:
        resource = ‘/{0}/{1}‘.format(bucket, fileKey)
        print u‘resource信息:\n‘, resource
        contentType = ‘‘
        s3url = s3_url + resource
        print ‘请求的s3 url地址:\n‘, s3url
    if method != ‘POST‘:
        headers = generate_v2_heder_token(method, contentMD5, contentType, date, resource, ak, sk)
    if method == ‘HEAD‘:
        rq = requests.head(url=s3url, headers=headers)
    elif method == ‘GET‘:
        rq = requests.get(s3url, headers=headers)
    elif method == ‘PUT‘:
        with open(currentFile, ‘rb‘) as f:
            # 读取本地文件
            d = f.read()
            rq = requests.put(s3url, data=d, headers=headers)
    else:
        form_list = collections.OrderedDict()  # 有序的字典,为后面form 表单的file放在最后
        resource = ‘/{0}‘.format(bucket)
        print u‘resource信息:\n‘, resource
        s3url = s3_url + resource
        print ‘请求的s3 url地址:\n‘, s3url
        with open(currentFile, ‘rb‘) as f:
            d = f.read()
            ###########form 表单数据######################
            form_list[‘key‘] = fileKey
            form_list[‘file‘] = (fileKey, str(d))
            print u‘文件上传,KEY:‘, fileKey
            m = MultipartEncoder(form_list)
            data = m.to_string()  # 待上传的数据
            contentType = m.content_type  # 获取表单的content-type值
            print u‘获取到的content-type:\n‘, contentType
            headers = generate_v2_heder_token(method, contentMD5, contentType, date, resource, ak, sk)
            rq = requests.post(s3url, data=data, headers=headers)
    status = rq.status_code
    if method != ‘GET‘:
        print ‘状态码code:{0}\n响应消息体:{1}\n响应头header:{2}\n‘.format(str(status), rq.text, rq.headers)
    else:
        print ‘状态码code:{0}\n响应头header:{1}\n‘.format(str(status), rq.headers)
    return rq

def copyFile(region, desBucket, desFileKey, srcBucket, srcFileKey, s3_url, host, ak, sk):
    # copy 文件
    # copy文件 生成目标文件文件
    resource = ‘/{0}/{1}‘.format(desBucket, desFileKey)
    print u‘resource信息:‘, resource
    xamzcopysource = ‘/{0}/{1}‘.format(srcBucket, srcFileKey)
    print ‘>>>>xamzcopysource信息:‘, xamzcopysource
    # 生成token
    method = ‘PUT‘
    amzdate = get_isotime()
    s3url = s3_url + resource
    print ‘s3请求url:\n‘, s3url
    headers = get_v4_header_token(region, method, host, desBucket, desFileKey, ‘‘, amzdate, ak, sk, ‘‘, ‘‘,
                                  ‘host;x-amz-content-sha256;x-amz-copy-source;x-amz-date‘, ‘‘, ‘‘, xamzcopysource)
    print ‘请求头headers:\n‘, headers
    rq = requests.put(s3url, headers=headers)
    status = str(rq.status_code)
    print ‘状态码:{0}\n返回结果:{1}\n响应消息头:{2}\n‘.format(status, rq.text, rq.headers)
    return rq

def getFileUrlSignature(region, methon, host, bucket, fileKey, amzdate, ak, sk, s3_url, requestType=‘‘, rqwithout=‘‘):
    # 获取文件,使用url带鉴权的方式
    print u‘使用url带鉴权下载:\n‘
    headers = {}
    if rqwithout == ‘X-Amz-Algorithm-invalid‘:
        algorithm = ‘AWS4-HMACSHA256‘
    else:
        algorithm = ‘AWS4-HMAC-SHA256‘
    request_parameters = get_urltoken_uri(region, ak, amzdate, algorithm)
    if requestType == ‘bucketUrl‘:
        resource = ‘/‘ + fileKey
        uri = get_v4_url_token(region, methon, bucket + ‘.‘ + host, ‘‘, fileKey, request_parameters, amzdate, ak, sk,
                               algorithm, rqwithout)
        geturl = ‘http://‘ + bucket + ‘.‘ + host + resource + ‘?‘ + uri
    elif requestType == ‘‘:
        resource = ‘/‘ + bucket + ‘/‘ + fileKey
        uri = get_v4_url_token(region, methon, host, bucket, fileKey, request_parameters, amzdate, ak, sk, algorithm,
                               rqwithout)
        geturl = s3_url + resource + ‘?‘ + uri
    else:
        resource = ‘/‘ + bucket + ‘/‘ + fileKey
        uri = get_v4_url_token(region, methon, host, bucket, fileKey, request_parameters, amzdate, ak, sk, algorithm,
                               rqwithout)
        geturl = s3_url + resource
    print u"请求url:", geturl
    headers[‘x-amz-date‘] = amzdate
    rq = requests.get(geturl, headers=headers)
    status = rq.status_code
    print ‘状态码code:{0}\n返回的文件内容长度:{1}\n响应头header:{2}\n‘.format(status, len(rq.content), rq.headers)
    return rq

def get_urltoken_uri(regionname, ak, amzdate, algorithm=‘AWS4-HMAC-SHA256‘, signed_headers_dict=‘‘):
    # 返回v4临时鉴权需要的urirequest_parameters信息
    datestamp = datetime.datetime.utcnow().strftime(‘%Y%m%d‘)
    # datestamp = ‘20170810‘
    region = regionname
    service = ‘s3‘
    algorithm = algorithm
    X_Amz_Expires = get_outtime_second(minutes=15)

    ##--第四个组合参数:SignedHeaders
    def get_signed_headers(list):
        key_list = sorted(list.keys())
        return ‘;‘.join(key_list)

    if signed_headers_dict == ‘‘:
        signed_headers = ‘host‘
    else:
        print ‘读取到的signed_headers_dict不为空,值为‘, signed_headers_dict
        signed_headers = get_signed_headers(signed_headers_dict)
    print ‘signed_headers信息:‘, signed_headers

    credential_scope = datestamp + ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    request_parameters = ‘X-Amz-Algorithm=‘ + algorithm +                          ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                          ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                          ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                          ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers
    return request_parameters

def request_parameters_urlencode(request_parameters):
    # 资源参数的urlencode编码
    tmp_uploads = False
    urlencode_dict = collections.OrderedDict()
    if request_parameters == ‘‘:
        return ‘‘
    else:
        request_parameters_list = request_parameters.split(‘&‘)
        for i in request_parameters_list:
            if i == ‘uploads‘:
                tmp_uploads = True
            else:
                tmp_list = i.split("=")
                urlencode_dict[tmp_list[0]] = tmp_list[1]
        if tmp_uploads:
            urlencode_dict[‘uploads‘] = ‘‘
        return urllib.urlencode(urlencode_dict)

def get_outtime_second(days=0, hours=0, minutes=0, second=0):
    # 计算过期时间,单位为妙,最大时间为7天
    if isinstance(days, int) and isinstance(hours, int) and isinstance(minutes, int) and isinstance(second, int):
        days_to_second = days * 24 * 60 * 60
        hours_to_second = hours * 60 * 60
        minutes_to_second = minutes * 60
        total_second = days_to_second + hours_to_second + minutes_to_second + second
        return total_second
    else:
        print ‘请输入正确的时分秒数据‘
        return

def get_v4_url_token(regionname, method, host, bucketname, filename, request_parameters, amzdate, ak, sk,
                     algorithm=‘AWS4-HMAC-SHA256‘, rqwithout=‘‘, postbody=‘‘, signed_headers_dict=‘‘):
    # 获取v4临时的的token
    # -----固定变量
    datestamp = datetime.datetime.utcnow().strftime(‘%Y%m%d‘)
    # datestamp = ‘20170810‘
    region = regionname  # ‘region_82‘
    service = ‘s3‘
    request_parametersurlencode = request_parameters_urlencode(request_parameters)  # urlencode编码后的参数
    print ‘编码后的uri:{0}‘.format(request_parametersurlencode)
    # -------构造请求地址url地址信息
    if request_parameters == ‘‘ and filename == ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname
    elif request_parameters == ‘‘ and filename != ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename
    elif filename == ‘‘ and request_parameters != ‘‘ and bucketname != ‘‘:
        resource_uri = ‘/‘ + bucketname + ‘?‘ + request_parametersurlencode
    elif request_parameters != ‘‘ and bucketname == ‘‘ and filename != ‘‘:
        resource_uri = ‘/‘ + filename + ‘?‘ + request_parametersurlencode
    elif request_parameters == ‘‘ and bucketname == ‘‘ and filename != ‘‘:
        resource_uri = ‘/‘ + filename
    elif request_parameters == ‘‘ and bucketname == ‘‘ and filename == ‘‘:
        resource_uri = ‘‘
    else:
        resource_uri = ‘/‘ + bucketname + ‘/‘ + filename + ‘?‘ + request_parametersurlencode
    print ‘请求的uri信息:‘, resource_uri
    # -------构造token信息-----------------------------------------
    ##--第一个组合参数:固定参数类型
    algorithm = algorithm
    ##--第二个组合参数:用户ak信息
    Credential = ak
    X_Amz_Expires = get_outtime_second(minutes=15)
    ##--第三个组合参数:credential_scope:datestamp+ ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    credential_scope_uncode = datestamp + ‘/‘ + region + ‘/‘ + service + ‘/‘ + ‘aws4_request‘
    print ‘第三个组合参数信息:credential_scopev‘, credential_scope_uncode
    credential_scope = urllib2.quote(credential_scope_uncode, safe=‘‘)

    ##--第四个组合参数:SignedHeaders
    def get_signed_headers(list):
        key_list = sorted(list.keys())
        return ‘;‘.join(key_list)

    if signed_headers_dict == ‘‘:
        signed_headers = ‘host‘
    else:
        print ‘读取到的signed_headers_dict不为空,值为‘, signed_headers_dict
        signed_headers = get_signed_headers(signed_headers_dict)
    print ‘signed_headers信息:‘, signed_headers

    ###--内嵌构造组合1-----
    if True:
        signing_key = getSignatureKey(sk, datestamp, region, service)
        ###--内嵌构造组合2-----
        if True:
            # ---子参数canonical_uri
            canonical_uri = resource_uri.split(‘?‘)[0]
            print ‘canonical_uri 信息:‘, canonical_uri
            # ---子参数canonical_querystring
            canonical_querystring = request_parametersurlencode
            print ‘canonical_querystring 信息:‘, canonical_querystring
            # --子参数canonical_headers
            payload_hash = ‘UNSIGNED-PAYLOAD‘
            if signed_headers_dict == ‘‘:
                headers = {‘host‘: host}
                headers_str = ‘‘
                for k, v in sorted(headers.items()):
                    headers_str += k + ":" + v + "\n"
                canonical_headers = headers_str
                print ‘无body的canonical_headers:‘, canonical_headers
            else:
                headers = signed_headers_dict
                headers_str = ‘‘
                for k, v in sorted(headers.items()):
                    headers_str += k + ":" + v + "\n"
                canonical_headers = headers_str
                print ‘请求带有body的canonical_headers:‘, canonical_headers
            # --子参数signed_headers

            # --子资源payload_hash
            # payload_hash
            canonical_request = method + ‘\n‘ + canonical_uri + ‘\n‘ + canonical_querystring + ‘\n‘ + canonical_headers + ‘\n‘ + signed_headers + ‘\n‘ + payload_hash
            print ‘canonical_request信息:‘, canonical_request
        string_to_sign = algorithm + ‘\n‘ + amzdate + ‘\n‘ + credential_scope_uncode + ‘\n‘ + hashlib.sha256(
            canonical_request).hexdigest()
        print ‘string_to_sign:‘, string_to_sign
        signature = hmac.new(signing_key, (string_to_sign).encode(‘utf-8‘), hashlib.sha256).hexdigest()
        print ‘signature信息:‘, signature
    if rqwithout == ‘X-Amz-Algorithm‘:
        authorization_url = ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Algorithm=‘:
        authorization_url = ‘X-Amz-Algorithm=‘ +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Date‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Credential=‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Credential‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Expires=‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Expires‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=88888888‘ +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-SignedHeaders=‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    elif rqwithout == ‘X-Amz-Signature=‘:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘
    else:
        authorization_url = ‘X-Amz-Algorithm=‘ + algorithm +                             ‘&‘ + ‘X-Amz-Credential=‘ + ak + ‘/‘ + credential_scope +                             ‘&‘ + ‘X-Amz-Date=‘ + amzdate +                             ‘&‘ + ‘X-Amz-Expires=‘ + str(X_Amz_Expires) +                             ‘&‘ + ‘X-Amz-SignedHeaders=‘ + signed_headers +                             ‘&‘ + ‘X-Amz-Signature=‘ + signature
    print ‘最后生成的token信息:‘, authorization_url
    return authorization_url

def get_getUsage_token_header(accessId, secretKey, timestamp, type):
    # 获取存储量的token
    print u‘获取存储量的token:\n‘
    print "accessId:", accessId
    print "secretKey:", secretKey
    print "timestamp:", timestamp
    print "type:", type
    stringToSign = "POST" + "/wcs_api/" + type + timestamp
    print "stringToSign:", stringToSign
    my_sign = hmac.new(secretKey, stringToSign, sha1).digest()
    token = base64.b64encode(my_sign)
    print "signature,", token
    headers = {}
    headers[‘accessId‘] = accessId
    headers[‘Content-Type‘] = ‘application/json;charset=utf-8‘
    headers[‘timestamp‘] = timestamp
    headers[‘signature‘] = token
    print ‘headers:‘, headers
    return headers

‘‘‘
获取分片上传文件信息
@filepath 分片文件存放地址
‘‘‘

def getfileinfo(filepath):
    from os.path import getsize, join
    # 获取分片文件的信息,返回每个片的大小,及总文件大小
    file_dict = {}
    total_size = 0
    tmp_num = 0
    try:
        for root, dirs, files in os.walk(filepath):
            print files
            if files == None:
                print ‘没有文件‘
            else:
                for i in files:
                    file_0 = getsize(join(root, i))
                    file_dict[i] = file_0
                    total_size += file_0
                    tmp_num += 1
        return file_dict, total_size
    except OSError, e:
        print ‘文件地址找不到文件!‘
        print e

def analysis_xml(file_name):
    def walkData(root_node, level, result_list, *args):
        temp_list = [level, root_node.tag.replace(‘{http://wcs.chinanetcenter.com/document}‘, ‘‘), root_node.text]
        result_list.append(temp_list)
        # 遍历每个子节点
        children_node = root_node.getchildren()
        # 如果子节点为空,返回
        if len(children_node) == 0:
            return
        for child in children_node:
            walkData(child, level + 1, result_list)

    level = 1  # 节点的深度从1开始
    result_list = []  # 用于存储带有节点等级的解析数据
    #######################获取跟节点####################
    root = ET.parse(file_name).getroot()
    walkData(root, level, result_list)
    ###########删除时间标签内容##############
    for partinfo in result_list:
        if partinfo[1] == ‘LastModified‘:
            result_list.remove(partinfo)
        if partinfo[1] == ‘Initiated‘:
            result_list.remove(partinfo)
    return result_list

‘‘‘
获取分片上传文件信息
@filepath 分片文件存放地址
‘‘‘

def getfileinfo(filepath):
    from os.path import getsize, join
    # 获取分片文件的信息,返回每个片的大小,及总文件大小
    file_dict = {}
    total_size = 0
    tmp_num = 0
    try:
        for root, dirs, files in os.walk(filepath):
            print files
            if files == None:
                print ‘没有文件‘
            else:
                for i in files:
                    file_0 = getsize(join(root, i))
                    file_dict[i] = file_0
                    total_size += file_0
                    tmp_num += 1
        return file_dict, total_size
    except OSError, e:
        print ‘文件地址找不到文件!‘
        print e

# 获取几天后时间
def get_afterdate(days=1):
    ‘获取几天前的时间,含时分秒‘
    import datetime  # 导入日期时间模块
    today = datetime.date.today()  # 获得今天的日期
    afterdate = today + datetime.timedelta(days=days)
    aftertime = afterdate.strftime("%Y-%m-%d %H:%M:%S")
    return aftertime

# 生成13位的时间戳
def datetime_timestamp(dt):
    # dt为字符串
    # 中间过程,一般都需要将字符串转化为时间数组
    time.strptime(dt, ‘%Y-%m-%d %H:%M:%S‘)
    ## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=-1)
    # 将"2012-03-28 06:53:40"转化为时间戳
    s = time.mktime(time.strptime(dt, ‘%Y-%m-%d %H:%M:%S‘))
    return int(s) * 1000

def ByteToHex(bins):
    """
    Convert a byte string to it‘s hex string representation e.g. for output.
    """
    return binascii.hexlify(bins).upper()

def get_content_sha256(file):
    # 计算文件的x-was-content-sha256值
    # print u‘获取文件的sha256 hash值:‘
    md = hashlib.sha256()
    with open(file, ‘rb‘) as f:
        d = f.read()
        md.update(d)
        sha256_hash = md.digest()
        # sha256_hash = md.hexdigest()
    print u‘文件的sha256 hash值为:\n‘, ByteToHex(sha256_hash)
    return ByteToHex(sha256_hash)

def getSHA256TreeHash(file):
    # 计算给定文件的数型hash值:Computes the SHA-256 tree hash for the given file
    # print u‘获取文件的tree sha256 hash值:‘
    chunkSHA256Hashes = getChunkSHA256Hashes(file)
    print u‘文件的树形sha256 hash值为:\n‘, ByteToHex(computeSHA256TreeHash(chunkSHA256Hashes))
    return ByteToHex(computeSHA256TreeHash(chunkSHA256Hashes))

def getChunkSHA256Hashes(file):
    # 计算文件每m的SHA256值,包括最后一块可能不是1M,Computes a SHA256 checksum for each 1 MB chunk of the input file. This includes the checksum for the last chunk even if it is smaller than 1 MB.
    md = hashlib.sha256()
    # print u‘md支持的所有方法,\n‘,dir(md)
    fileSize = getFileSize(file)
    numChunks = fileSize / ONE_MB
    if fileSize % ONE_MB > 0:
        numChunks += 1
    print u‘总共块数:\n‘, numChunks
    if numChunks == 0:
        return md.digest()
    f = open(file, ‘rb‘)
    idx = 0
    chunkSHA256Hashes = [[]] * numChunks
    while True:
        chunk_data = f.read(ONE_MB)
        if not chunk_data:
            break
        else:
            newmd = hashlib.sha256()
            newmd.update(chunk_data)
            chunkSHA256Hashes[idx] = newmd.digest()
            idx += 1
    f.close()
    return chunkSHA256Hashes

def computeSHA256TreeHash(chunkSHA256Hashes):
    # Computes the SHA-256 tree hash for the passed array of 1 MB chunk checksums.
    md = hashlib.sha256()
    prevLvlHashes = chunkSHA256Hashes
    while len(prevLvlHashes) > 1:
        length = len(prevLvlHashes) / 2
        if len(prevLvlHashes) % 2 != 0:
            length += 1
        j = 0
        i = 0
        currLvlHashes = [[]] * length
        while i < len(prevLvlHashes):
            # If there are at least two elements remaining
            if len(prevLvlHashes) - i > 1:
                # Calculate a digest of the concatenated nodes
                newmd = hashlib.sha256()
                newmd.update(prevLvlHashes[i])
                newmd.update(prevLvlHashes[i + 1])
                currLvlHashes[j] = newmd.digest()
            else:
                # Take care of remaining odd chunk
                currLvlHashes[j] = prevLvlHashes[i]
            i = i + 2
            j = j + 1
        prevLvlHashes = currLvlHashes
    return prevLvlHashes[0]

def uploadFileWithDatas_303upload(filePath, url, data):
    # @函数目的:上传文件
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:url:上传URL
    # @参数说明:data:请求表单
    # @返回值:上传应答
    print (‘本地上传文件路径为:%s‘ % filePath)
    files = {‘file‘: open(filePath, ‘rb‘)}
    print (‘上传URL为:%s‘ % url)
    resp = requests.post(url, data=data, files=files, allow_redirects=False)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def analysis_vos_log_rest(logpath, filekey):
    # 解析VOS日志
    #     print u‘请求的命令是:‘ + ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey + ‘ | tail -n 1‘
    #     vosLog = subprocess.Popen(‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey + ‘ | tail -n 1‘, stdout=subprocess.PIPE, shell=True)
    print u‘请求的命令是:‘ + ‘cat ‘ + logpath + ‘ | grep ‘ + filekey + ‘ | tail -n 1‘
    vosLog = subprocess.Popen(‘cat ‘ + logpath + ‘ | grep ‘ + filekey + ‘ | tail -n 1‘, stdout=subprocess.PIPE,
                              shell=True)
    vosLog.wait()
    vosLogOut = vosLog.stdout.readlines()
    print type(vosLogOut)
    for str in vosLogOut:
        print u‘符合条件的VOS日志:‘ + str
    if vosLogOut:
        last_line = vosLogOut[-1].strip()
        print u‘最后一行的VOS日志:‘ + last_line
    vosLogList = last_line.split()
    vosLogMap = collections.OrderedDict()
    # 字段说明:请求空间
    vosLogMap[‘bucket‘] = vosLogList[5]
    # 字段说明:文件名
    vosLogMap[‘filename‘] = vosLogList[6]
    # 字段说明:响应状态
    vosLogMap[‘status_code‘] = vosLogList[7]
    # 字段说明:客户端ip
    vosLogMap[‘client_ip‘] = vosLogList[9]
    # 字段说明:服务器ip
    vosLogMap[‘server_ip‘] = vosLogList[23]
    # 字段说明:用户端(请求端)ip
    vosLogMap[‘cdn_src_ip‘] = vosLogList[29]
    # 字段说明:Cdn回源
    vosLogMap[‘xFromCDN‘] = vosLogList[31]
    print u‘对应的VOS日志各字段值:‘
    for key, value in vosLogMap.items():
        print u‘vos相应字段: ‘ + key + ‘    vos相应字段的值: ‘ + value
    return vosLogMap

def analysis_vos_log(serverip, serverport, logpath, filekey):
    # 解析VOS日志
    print u‘请求的命令是:‘ + ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey
    vosLog = subprocess.Popen(
        ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey,
        stdout=subprocess.PIPE, shell=True)
    vosLog.wait()
    vosLogOut = vosLog.stdout.readlines()
    print type(vosLogOut)
    for str in vosLogOut:
        print u‘符合条件的VOS日志:‘ + str
    if vosLogOut:
        last_line = vosLogOut[-1].strip()
        print u‘最后一行的VOS日志:‘ + last_line
    vosLogList = last_line.split()
    vosLogMap = collections.OrderedDict()
    # 字段说明:请求空间
    vosLogMap[‘bucket‘] = vosLogList[5]
    # 字段说明:文件名
    vosLogMap[‘filename‘] = vosLogList[6]
    # 字段说明:响应状态
    vosLogMap[‘status_code‘] = vosLogList[7]
    # 字段说明:客户端ip
    vosLogMap[‘client_ip‘] = vosLogList[9]
    # 字段说明:UA(该字段可能会有空格,所以后续字段的位置会有所变动)
    vosLogMap[‘user_agent‘] = vosLogList[11]
    # 字段说明:原始数据大小
    vosLogMap[‘data_size‘] = vosLogList[12]
    # 字段说明:响应的body大小
    vosLogMap[‘reply_size‘] = vosLogList[14]
    # 字段说明:回复的总大小,包括响应头
    vosLogMap[‘reply_total‘] = vosLogList[15]
    # 字段说明:Cdn回源
    vosLogMap[‘xFromCDN‘] = vosLogList[31]
    print u‘对应的VOS日志各字段值:‘
    for key, value in vosLogMap.items():
        print u‘vos相应字段: ‘ + key + ‘    vos相应字段的值: ‘ + value
    return vosLogMap

def analysis_vos_traffic(serverip, serverport, logpath, filekey):
    # 解析VOS日志
    traffic = 0
    print u‘请求的命令是:‘ + ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey
    vosLog = subprocess.Popen(
        ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey,
        stdout=subprocess.PIPE, shell=True)
    vosLog.wait()
    vosLogOut = vosLog.stdout.readlines()
    print type(vosLogOut)
    for i in range(len(vosLogOut)):
        print u‘符合条件的VOS日志:‘ + vosLogOut[i]
        vosLogMap = collections.OrderedDict()
        vosLogMap[‘data_size‘] = vosLogOut[i].strip().split()[12]
        traffic = traffic + int(re.sub("\"", "", vosLogMap[‘data_size‘]))
    return len(vosLogOut), traffic

def analysis_vos_log_online(serverip, serverport, logpath, filekey):
    # 解析VOS日志
    print u‘请求的命令是:‘ + ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey + ‘ | tail -n 1‘
    vosLog = subprocess.Popen(
        ‘ssh [email protected]‘ + serverip + ‘ -p ‘ + serverport + ‘ \‘cat ‘ + logpath + ‘ | grep \‘‘ + filekey + ‘ | tail -n 1‘,
        stdout=subprocess.PIPE, shell=True)
    vosLog.wait()
    vosLogOut = vosLog.stdout.readlines()
    print type(vosLogOut)
    for str in vosLogOut:
        print u‘符合条件的VOS日志:‘ + str
    if vosLogOut:
        last_line = vosLogOut[-1].strip()
        print u‘最后一行的VOS日志:‘ + last_line
    vosLogList = last_line.split()
    vosLogMap = collections.OrderedDict()
    # 字段说明:请求空间
    vosLogMap[‘bucket‘] = vosLogList[5]
    # 字段说明:文件名
    vosLogMap[‘filename‘] = vosLogList[6]
    # 字段说明:响应状态
    vosLogMap[‘status_code‘] = vosLogList[7]
    # 字段说明:客户端ip
    vosLogMap[‘client_ip‘] = vosLogList[9]
    # 字段说明:sdk版本(该字段可能会有空格,所以后续字段的位置会有所变动)
    vosLogMap[‘sdk_version‘] = vosLogList[10]
    # 字段说明:UA(该字段可能会有空格,所以后续字段的位置会有所变动)
    vosLogMap[‘user_agent‘] = vosLogList[11] + ‘ ‘ + vosLogList[12] + ‘ ‘ + vosLogList[13] + ‘ ‘ + vosLogList[
        14] + ‘ ‘ + vosLogList[15] + ‘ ‘ + vosLogList[16] + ‘ ‘ + vosLogList[17]
    # 字段说明:响应的body大小
    vosLogMap[‘reply_size‘] = vosLogList[20]
    # 字段说明:回复的总大小,包括响应头
    vosLogMap[‘reply_total‘] = vosLogList[21]
    # 字段说明:Cdn回源
    vosLogMap[‘xFromCDN‘] = vosLogList[37]
    print u‘对应的VOS日志各字段值:‘
    for key, value in vosLogMap.items():
        print u‘vos相应字段: ‘ + key + ‘    vos相应字段的值: ‘ + value
    return vosLogMap

# 生成文件库token
def generate_token_pt(urlpath, datastring, ak, sk):
    stringToSign = urlpath + ‘\n‘ + datastring
    print u‘鉴权信息stringToSign: ‘, ‘\n‘ + stringToSign + ‘\n‘
    my_sign = hmac.new(sk, stringToSign, sha1).hexdigest()
    signature = base64.b64encode(my_sign)
    print ‘signature: ‘, signature
    token = ak + ‘:‘ + signature
    print ‘token: ‘, token
    return token

# oppo 专用鉴权信息
‘‘‘
@urlpath fetch请求的参数
@RequestParams 请求数据内容,带入为字典
@ak,sk  系统的aksk 信息
‘‘‘

def generate_token_oppo(urlpath, RequestParams, ak, sk, ):
    postdata = JSONEncoder().encode(RequestParams)  # 将字典转成json 格式,不然接口解析不了
    print u‘原字典转成json格式:‘, postdata
    stringToSign = urlpath + ‘\n‘ + postdata
    print u‘鉴权信息stringToSign: ‘, ‘\n‘ + stringToSign + ‘\n‘
    my_sign = hmac.new(sk, stringToSign, sha1).hexdigest()
    signature = base64.b64encode(my_sign)
    token = ak + ‘:‘ + signature
    return token, postdata

‘‘‘
创建文件库
 @vaultname:文件库名称,4-32位,名称由字母(小写字母)、数字以及-(减号)组成;不以数字和-(减号)开头,且-(减号)不能作为结尾
 @ak,sk  系统的aksk 信息
 @url,region的管理域名或rest url
 requesturl: http://10.8.198.32:98/vaults/vaultid
‘‘‘

def create_vault(vaultName, mgr_url, ak, sk, date):
    header = {}
    urlpath = ‘/vaults/‘ + vaultName
    datastring = ‘‘
    token = generate_token_pt(urlpath, datastring, ak, sk)
    header[‘Authorization‘] = token
    header[‘Date‘] = date
    url = mgr_url + urlpath
    print u‘请求的url信息:‘, url
    rq = requests.put(url=url, headers=header, data=datastring)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rq.status_code), rq.headers, rq.text)
    return rq

‘‘‘
删除文件库
 @vaultname:文件库名称,4-32位,名称由字母(小写字母)、数字以及-(减号)组成;不以数字和-(减号)开头,且-(减号)不能作为结尾
 @ak,sk  系统的aksk 信息
 @url,region的管理域名或rest url
 requesturl: http://10.8.198.32:98/vaults/vaultid
‘‘‘

def archivestorage_delete_vault(uri, host, url, ak, sk, date):
    # uri = ‘/vaults/‘+vaultid
    print u‘****开始删除文件库!****‘
    requesturl = url + uri
    datastring = ‘‘
    header = vault_header(uri, host, datastring, ak, sk, date)
    print u‘>>>>请求的url信息:‘, requesturl
    rq = requests.delete(url=requesturl, headers=header)
    # 校验请求返回的响应状态码,打印返回头部内容
    # result = Response_status(rq,200)
    return rq

def vault_header(uri, host, datastring, ak, sk, date):
    header = {}
    token = generate_token_pt(uri, datastring, ak, sk)
    header[‘Authorization‘] = token
    header[‘Date‘] = date
    header[‘Host‘] = host
    print u‘>>>>请求header:‘, header
    return header

‘‘‘
删除档案
 @vaultname:文件库名称,4-32位,名称由字母(小写字母)、数字以及-(减号)组成;不以数字和-(减号)开头,且-(减号)不能作为结尾
 @ak,sk  系统的aksk 信息
 @url,region的管理域名或rest url
 requesturl: http://10.8.198.32:98/vaults/vaultid/archives/archiveid
‘‘‘

def delete_vaults_archives(uri, host, url, ak, sk, date):
    datastring = ‘‘
    mgr_token = generate_token_pt(uri, datastring, ak, sk)  # 通过python脚本而生成的mgr_token
    header = {}
    header[‘Authorization‘] = mgr_token
    header[‘Date‘] = date
    header[‘Host‘] = host
    print u‘>>>>请求header:\n‘, header
    print u‘****************开始删除档案!****************‘
    requesturl = url + uri
    print u‘>>>>请求的url信息:‘, requesturl
    rq = requests.delete(url=requesturl, headers=header)
    showRes(rq)
    return rq

# oppo fetch接口
‘‘‘
@urlpath fetch请求的参数
@RequestParams 请求数据内容,带入为字典
@ak,sk  系统的aksk 信息
‘‘‘

def customfmgr_fetch(bucket, session_id, fops, notifyUrl, urlpath, ak, sk, getFopsUrl):
    data = {}
    callback = base64Encode(notifyUrl)
    data["session_id"] = session_id
    data["fops"] = fops
    data["callback"] = callback
    data["bucketName"] = bucket
    print ‘原始的data字典内容:‘, data
    print ‘-------------------------------------生成token信息-----------------------‘
    token, data_string = generate_token_oppo(urlpath, data, ak, sk)
    print ‘生成的token信息:‘, token
    headers = {‘Authorization‘: token, "Expect:": ‘100-continue‘}
    print ‘请求上传数据格式内容:‘, data_string
    rqFops = requests.post(getFopsUrl, data=data_string, headers=headers)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rqFops.status_code), rqFops.headers, rqFops.text)
    return rqFops

def getUsage(data, accessId, secretKey, timestamp, type, url):
    # 获取存储量
    print u‘请求的数据值:\n‘, data
    print u‘请求的URL:\n‘, url
    tokenHeaders = get_getUsage_token_header(accessId, secretKey, timestamp, type)
    resp = requests.post(url, data=data, headers=tokenHeaders)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(resp.status_code), resp.headers, resp.text)
    return resp

def get_panada_token_header(stringToSign, expire=‘‘):
    # 生成panada的鉴权头部
    print u‘生成panada的鉴权头部:\n‘
    print "stringToSign:", stringToSign
    my_sign = hmac.new(‘you_are_awesome‘ + expire, stringToSign, sha1).digest()
    token = base64.b64encode(my_sign)
    print "token值:", token
    headers = {}
    headers[‘Access-Token‘] = token
    print ‘headers:‘, headers
    return headers

def panada_ops(queryStr, opType, bucket, panadaListUrl, savePath=‘‘, expire=‘‘):
    # 熊猫的公共请求方法
    print u‘熊猫的公共请求方法:\n‘
    stringToSign = ‘/wslive/‘ + opType + ‘/‘ + bucket + queryStr
    tokenHeaders = get_panada_token_header(stringToSign, expire)
    url = panadaListUrl + bucket + queryStr
    print ‘请求的URL为:‘, url
    rq = requests.get(url, headers=tokenHeaders)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rq.status_code), rq.headers, rq.text)
    if savePath != ‘‘:
        with open(savePath, "wb") as code:
            code.write(rq.text)
        print (‘文件成功保存到本地:%s‘ % savePath)
    return rq

def panada_old_ops(queryStr, opType, bucket, panadaSeverUrl, savePath=‘‘, expire=‘‘):
    # 熊猫的公共请求方法
    print u‘熊猫的原list接口,不需要鉴权:\n‘
    url = panadaSeverUrl + bucket + queryStr
    print ‘请求的URL为:‘, url
    rq = requests.get(url)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rq.status_code), rq.headers, rq.text)
    if savePath != ‘‘:
        with open(savePath, "wb") as code:
            code.write(rq.text)
        print (‘文件成功保存到本地:%s‘ % savePath)
    return rq

def panadaCutSteamSearch(panadaStreamSearchUrl, searchSuffix):
    # 熊猫的公共请求方法
    print u‘剪辑查询的查询串:‘ + searchSuffix
    print u‘剪辑查询的请求URL:‘ + panadaStreamSearchUrl
    rqStatus = requests.get(panadaStreamSearchUrl + searchSuffix)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rqStatus.status_code), rqStatus.headers, rqStatus.text)
    return rqStatus

def fopsOp(bucket, ak, sk, fops, opType, mgrUrl, notifyUrl, channelname, livestreamPullAdd=‘‘, pull_stream=‘‘,
           logPath=‘/tmp/pull_stream.log‘, batchNotifyID=‘‘, deadline=‘‘):
    # fops与wslive操作
    if livestreamPullAdd != ‘‘:
        base64_str_livestreamPullAdd = base64Encode(livestreamPullAdd)
    else:
        base64_str_channelname = base64Encode(channelname)
        base64_str_bucket = base64Encode(bucket)
    base64_str_fops = base64Encode(fops)
    base64_str_notifyUrl = base64SafeUrlEncode(notifyUrl)
    if livestreamPullAdd != ‘‘:
        if batchNotifyID != ‘‘:
            fops_body = "livestreamPullAdd=" + base64_str_livestreamPullAdd + "&ops=" + base64_str_fops + "&force=1&notifyURL=" + base64_str_notifyUrl
        else:
            fops_body = "livestreamPullAdd=" + base64_str_livestreamPullAdd + "&ops=" + base64_str_fops + "&force=1&notifyURL=" + base64_str_notifyUrl + "&batchNotifyID=wsrecord-testid&batchNotifyInterval=100&batchNotifyCount=20&pullStreamTimeout=20"
    else:
        fops_body = ‘bucket=‘ + base64_str_bucket + ‘&channelname=‘ + base64_str_channelname + ‘&fops=‘ + base64_str_fops + ‘&notifyURL=‘ + base64_str_notifyUrl + ‘&force=1‘
    if deadline != ‘‘:
        fops_body = fops_body + ‘&deadline=‘ + deadline
    fopToken = generateMgrFopsToken(ak, sk, opType, fops_body)
    getFopsUrl = getMgrUrl(mgrUrl, ‘/‘ + opType, ‘‘)
    if livestreamPullAdd != ‘‘:
        # 推流
        curlCommad(‘nohup sh -ex  ‘ + pull_stream + ‘  >‘ + logPath + ‘ 2>&1  &‘)
    # 发请求
    rqFops = mgrOperationWithDatas(getFopsUrl, fopToken, fops_body)
    status = rqFops.status_code
    print ‘状态码code:{0}\n响应消息体:{1}\n响应头header:{2}\n‘.format(str(status), rqFops.text, rqFops.headers)
    return rqFops

def getSignatureKey(key, dateStamp, regionName, serviceName):
    # 获取sha1编码
    def sign(key1, msg1):
        return hmac.new(key1, msg1.encode(‘utf-8‘), hashlib.sha256).digest()

    kDate = sign((‘AWS4‘ + key).encode(‘utf-8‘), dateStamp)
    kRegion = sign(kDate, regionName)
    kService = sign(kRegion, serviceName)
    kSigning = sign(kService, ‘aws4_request‘)
    return kSigning

def get_md5_value(src):
    # 获取字符串的MD5值
    myMd5 = hashlib.md5()
    myMd5.update(src)
    myMd5_Digest = base64.b64encode(myMd5.digest())
    print ("计算出来的MD5值为:%s" % myMd5_Digest)
    return myMd5_Digest

def get_md5_value_32(src):
    # 获取字符串的MD5值
    myMd5 = hashlib.md5()
    myMd5.update(src)
    myMd5_Digest = myMd5.hexdigest()
    print ("计算出来的MD5值为:%s" % myMd5_Digest)
    return myMd5_Digest

def generateToken(resource, contentType, contentMD5, method, date, ak, sk):
    # token的算是 HMAC_SHA1算法,然后再次进行base64加密
    # HMAC运算利用哈希算法,以一个密钥和一个消息为输入,生成一个消息摘要作为输出
    stringToSign = method + ‘\n‘ + contentMD5 + ‘\n‘ + contentType + ‘\n‘ + date + ‘\n‘ + resource
    print ‘stringToSign: ‘, stringToSign
    my_sign = hmac.new(sk, stringToSign, sha1).digest()
    signature = base64.b64encode(my_sign)
    token = ‘AWS‘ + ‘ ‘ + ak + ‘:‘ + signature
    print ‘token信息:‘, token
    return token

‘‘‘
鉴权信息组合,生成最后头部需要的token 信息
@method,请求的方式,post或是get等
@contentMD5,文件的md5值,需要经过base64加密后的
@contentType
@date ,时间日期,格式为GMT
@CanonicalizedResource,接口子资源信息
@sk,用户Sk
@ak,用户AK
@CanonicalizedOBSHeaders=‘‘,额外的参数,如x-amz-date,默认为空
‘‘‘

def generate_token(method, contentMD5, contentType, date, CanonicalizedResource, ak, sk, CanonicalizedOBSHeaders=‘‘):
    if CanonicalizedOBSHeaders == ‘‘:
        # token的算是 HMAC_SHA1算法,然后再次进行base64加密
        # HMAC运算利用哈希算法,以一个密钥和一个消息为输入,生成一个消息摘要作为输出
        stringToSign = method + ‘\n‘ + contentMD5 + ‘\n‘ + contentType + ‘\n‘ + date + ‘\n‘ + CanonicalizedResource
        print u‘鉴权信息stringToSign: ‘, stringToSign
        my_sign = hmac.new(sk, stringToSign, sha1).digest()
        signature = base64.b64encode(my_sign)
        token = ‘AWS‘ + ‘ ‘ + ak + ‘:‘ + signature
    else:
        # token的算是 HMAC_SHA1算法,然后再次进行base64加密
        # HMAC运算利用哈希算法,以一个密钥和一个消息为输入,生成一个消息摘要作为输出
        stringToSign = method + ‘\n‘ + contentMD5 + ‘\n‘ + contentType + ‘\n‘ + date + ‘\n‘ + CanonicalizedOBSHeaders + ‘\n‘ + CanonicalizedResource
        print u‘鉴权信息stringToSign: ‘, stringToSign
        my_sign = hmac.new(sk, stringToSign, sha1).digest()
        signature = base64.b64encode(my_sign)
        token = ‘AWS‘ + ‘ ‘ + ak + ‘:‘ + signature
    print u‘生成token信息:‘, token
    return token

def generateUploadToken(ak, sk, bucketName, fileKey, fsizeLimit, exprieSeconds, overwrite, extend=‘‘):
    # @函数目的:生成上传凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名 fileKey:上传文件名 fsizeLimit:文件大小限制 expriSeconds:token时效 overwrite:是否覆盖
    # @返回值:上传token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    bucketStr = ‘bucket=‘ + bucketName
    fileKeyStr = ‘fileKey=‘ + fileKey
    fsizelimitStr = ‘fsizeLimit=‘ + fsizeLimit
    exprieSecondsStr = ‘expireSeconds=‘ + exprieSeconds
    overwriteStr = ‘overwrite=‘ + overwrite
    if extend == ‘‘:
        excuteCmd = ‘ ‘.join(
            [‘java -jar‘, getToolPath(), ‘-ut‘, akStr, skStr, bucketStr, fileKeyStr, fsizelimitStr, exprieSecondsStr,
             overwriteStr])
    else:
        excuteCmd = ‘ ‘.join(
            [‘java -jar‘, getToolPath(), ‘-ut‘, akStr, skStr, bucketStr, fileKeyStr, fsizelimitStr, exprieSecondsStr,
             overwriteStr, extend])
    print ("token生成命令:%s" % excuteCmd)
    uploadToken = os.popen(excuteCmd).read()
    print ("上传凭证信息:%s" % uploadToken)
    return uploadToken.strip()

def generateUploadTokenWithoutKey(ak, sk, bucketName, fsizeLimit, exprieSeconds, overwrite):
    # @函数目的:生成上传凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名  fsizeLimit:文件大小限制 expriSeconds:token时效 overwrite:是否覆盖
    # @返回值:上传token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    bucketStr = ‘bucket=‘ + bucketName
    fsizelimitStr = ‘fsizeLimit=‘ + fsizeLimit
    exprieSecondsStr = ‘expireSeconds=‘ + exprieSeconds
    overwriteStr = ‘overwrite=‘ + overwrite
    excuteCmd = ‘ ‘.join(
        [‘java -jar‘, getToolPath(), ‘-ut‘,
         akStr, skStr, bucketStr, fsizelimitStr, exprieSecondsStr, overwriteStr]
    )
    print ("token生成命令:%s" % excuteCmd)
    uploadToken = os.popen(excuteCmd).read()
    print ("上传凭证信息:%s" % uploadToken)
    return uploadToken.strip()

def generateMgrDeleteStatToken(ak, sk, bucketName, fileKey, opType):
    # @函数目的:生成删除,stat管理凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名 fileKey:文件名
    # @返回值:管理token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    base64_str_file = base64SafeUrlEncode(bucketName + ‘:‘ + fileKey)
    urlStr = ‘url=/‘ + opType + ‘/‘ + base64_str_file
    excuteCmd = ‘ ‘.join([‘java -jar‘, getToolPath(), ‘-mt‘, akStr, skStr, urlStr])
    print ("token生成命令:%s" % excuteCmd)
    mgrTokenStr = os.popen(excuteCmd).read()
    mgrToken = mgrTokenStr.strip().split(‘\n‘)[-1]
    print ("管理凭证信息:%s" % mgrToken)
    return mgrToken.strip()

def generateMgrGetFileHashToken(ak, sk, bucketName, opType):
    # @函数目的:生成删除,stat管理凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名 fileKey:文件名
    # @返回值:管理token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    base64_str_file = base64Encode(bucketName)
    urlStr = ‘url=/‘ + opType + ‘/‘ + base64_str_file
    excuteCmd = ‘ ‘.join([‘java -jar‘, getToolPath(), ‘-mt‘, akStr, skStr, urlStr])
    print ("token生成命令:%s" % excuteCmd)
    mgrTokenStr = os.popen(excuteCmd).read()
    mgrToken = mgrTokenStr.strip().split(‘\n‘)[-1]
    print ("管理凭证信息:%s" % mgrToken)
    return mgrToken.strip()

def generateMgrListToken(ak, sk, opType, detailStr):
    # @函数目的:生成删除,stat管理凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名 fileKey:文件名
    # @返回值:管理token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    urlStr = ‘url=‘ + "‘/" + opType + detailStr + "‘"
    excuteCmd = ‘ ‘.join([‘java -jar‘, getToolPath(), ‘-mt‘, akStr, skStr, urlStr])
    print ("token生成命令:%s" % excuteCmd)
    mgrTokenStr = os.popen(excuteCmd).read()
    mgrToken = mgrTokenStr.strip().split(‘\n‘)[-1]
    print ("管理凭证信息:%s" % mgrToken)
    return mgrToken.strip()

def generateMgrFopsToken(ak, sk, opType, body):
    # @函数目的:生成删除,stat管理凭证
    # @参数说明:ak,sk: 用户的AK/SK bucketName:空间名 fileKey:文件名
    # @返回值:管理token值
    akStr = ‘AK=‘ + ak
    skStr = ‘SK=‘ + sk
    urlStr = ‘url=‘ + "‘/" + opType + "‘"
    bodyStr = ‘body=‘ + "‘" + body + "‘"
    excuteCmd = ‘ ‘.join([‘java -jar‘, getToolPath(), ‘-mt‘, akStr, skStr, urlStr, bodyStr])
    print ("token生成命令:%s" % excuteCmd)
    mgrTokenStr = os.popen(excuteCmd).read()
    mgrToken = mgrTokenStr.strip().split(‘\n‘)[-1]
    print ("管理凭证信息:%s" % mgrToken)
    return mgrToken.strip()

def getMgrUrl(mgrUrl, opType, detailStr):
    # @函数目的:生成管理凭证的请求URL
    # @参数说明:mgrUrl管理URL
    # @参数说明:opType操作类型
    # @参数说明:detailStr操作详细值
    # @返回值:请求URL
    requestUrl = mgrUrl + opType + detailStr
    print ("请求URL为:%s" % requestUrl)
    return requestUrl

def uploadFile(filePath, uploadToken, url):
    # @函数目的:上传文件
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:uploadToken:上传凭证
    # @参数说明:url:上传URL
    # @返回值:上传应答
    data = {‘token‘: uploadToken}
    print (‘本地上传文件路径为:%s‘ % filePath)
    files = {‘file‘: open(filePath, ‘rb‘)}
    print (‘上传URL为:%s‘ % url)
    resp = requests.post(url, data=data, files=files)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def completeUploadFile(bucket, ak, sk, fileKey, currentFile, wcs_base_upload_url):
    # 完整地上传一个文件
    uploadToken = generateUploadToken(ak, sk, bucket, fileKey, ‘1000000000‘, ‘1000000000‘, ‘1‘)
    rq = uploadFile(currentFile, uploadToken, wcs_base_upload_url)
    return rq

def completeOps(bucket, ak, sk, mgrUrl, fileKey, trancodeKey, fops, notifyUrl):
    # 发视频操作
    getFopsUrl = getMgrUrl(mgrUrl, ‘/fops‘, ‘‘)
    base64_str_bucket = base64Encode(bucket)
    base64_str_filekey = base64SafeUrlEncode(fileKey)
    # base64_saveas = base64Encode(bucket + ‘:‘ + trancodeKey)
    base64_saveas = base64SafeUrlEncode(bucket + ‘:‘ + trancodeKey)
    base64_str_fops = base64Encode(fops + ‘|saveas/‘ + base64_saveas)
    base64_str_notifyUrl = base64Encode(notifyUrl)
    fops_body = ‘bucket=‘ + base64_str_bucket + ‘&key=‘ + base64_str_filekey + ‘&fops=‘ + base64_str_fops + ‘&notifyURL=‘ + base64_str_notifyUrl + ‘&force=1‘
    fopToken = generateMgrFopsToken(ak, sk, ‘fops‘, fops_body)
    rqFops = mgrOperationWithDatas(getFopsUrl, fopToken, fops_body)
    return rqFops

def completeOpsWithBody(ak, sk, mgrUrl, fops_body):
    # 发视频操作,参数是AK/SK,管理域名,封装好的请求fops_body部分
    getFopsUrl = getMgrUrl(mgrUrl, ‘/fops‘, ‘‘)
    fopToken = generateMgrFopsToken(ak, sk, ‘fops‘, fops_body)
    rqFops = mgrOperationWithDatas(getFopsUrl, fopToken, fops_body)
    return rqFops

def fmgr_ops(ak, sk, mgr_url, fmgr_op, fops_body):
    # fmgr请求操作
    print u‘fmgr请求:\n‘
    getFopsUrl = getMgrUrl(mgr_url, ‘/fmgr/‘ + fmgr_op, ‘‘)
    fopToken = generateMgrFopsToken(ak, sk, ‘fmgr/‘ + fmgr_op, fops_body)
    rqFops = mgrOperationWithDatas(getFopsUrl, fopToken, fops_body)
    return rqFops

def uploadFileWithDatas(filePath, url, data):
    # @函数目的:上传文件
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:url:上传URL
    # @参数说明:data:请求表单
    # @返回值:上传应答
    print (‘本地上传文件路径为:%s‘ % filePath)
    files = {‘file‘: open(filePath, ‘rb‘)}
    print (‘上传URL为:%s‘ % url)
    resp = requests.post(url, data=data, files=files)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def appendFile(filePath, uploadToken, url, position):
    # @函数目的:追加文件
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:uploadToken:上传凭证
    # @参数说明:url:追加URL
    # @返回值:追加应答
    data = {‘token‘: uploadToken}
    files = {‘file‘: open(filePath, ‘rb‘)}
    appendUrl = url + str(position)
    print (‘追加URL为:%s‘ % appendUrl)
    resp = requests.post(appendUrl, data=data, files=files)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def appendFileWithDatas(filePath, url, position, data):
    # @函数目的:追加文件,指定加专门的表单参数
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:url:追加URL
    # @参数说明:position:追加position
    # @参数说明:data:参数表单
    # @返回值:追加应答
    files = {‘file‘: open(filePath, ‘rb‘)}
    appendUrl = url + str(position)
    print (‘追加URL为:%s‘ % appendUrl)
    resp = requests.post(appendUrl, data=data, files=files)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def appendFileWithHeaders(filePath, uploadToken, url, position, headers):
    # @函数目的:追加文件
    # @参数说明:filePath:本地上传文件路径
    # @参数说明:uploadToken:上传凭证
    # @参数说明:url:追加URL
    # @参数说明:position:追加position
    # @参数说明:headers:请求头部headers
    # @返回值:追加应答
    data = {‘token‘: uploadToken}
    files = {‘file‘: open(filePath, ‘rb‘)}
    appendUrl = url + str(position)
    print (‘追加URL为:%s‘ % appendUrl)
    print (‘请求头部为:%s‘ % headers)
    resp = requests.post(appendUrl, data=data, files=files, headers=headers)
    print (‘上传请求响应码:%s‘ % resp.status_code)
    print (‘上传响应头部:%s‘ % resp.headers)
    print (‘上传响应内容:%s‘ % resp.text)
    return resp

def curlCommad(excuteCmd, isReturn=‘‘):
    # @函数目的:使用curl命令来执行
    # @参数说明:excuteCmd:执行脚本
    # @返回值:返回curl执行结果
    print ("执行命令:%s" % excuteCmd)
    if isReturn == ‘‘:
        excuteResult = os.popen(excuteCmd).read()
        print ("执行结果:\n%s" % excuteResult)
        return excuteResult.strip().split(‘\n‘)
    else:
        os.popen(excuteCmd).read()

def curlCommadWithoutAnayle(excuteCmd):
    # @函数目的:使用curl命令来执行
    # @参数说明:excuteCmd:执行脚本
    # @返回值:返回curl执行结果
    print ("执行命令:%s" % excuteCmd)
    excuteResult = os.popen(excuteCmd).read()
    print ("执行结果:\n%s" % excuteResult)
    return excuteResult.strip()

def mgrOperation(url, mgrToken):
    # @函数目的:MGR操作
    # @参数说明:mgrToken:管理凭证
    # @参数说明:url:各种类型的mgrURL
    # @返回值:请求应答
    headers = {‘Authorization‘: mgrToken}
    print (‘MGR操作的URL为:%s‘ % url)
    resp = requests.post(url, headers=headers)
    print (‘MGR请求响应码:%s‘ % resp.status_code)
    print (‘MGR响应头部:%s‘ % resp.headers)
    print (‘MGR响应内容:%s‘ % resp.text)
    return resp

def mgrOperationWithDatas(url, mgrToken, data):
    # @函数目的:MGR操作
    # @参数说明:mgrToken:管理凭证
    # @参数说明:url:各种类型的mgrURL
    # @参数说明:data:请求参数列表
    # @返回值:请求应答
    headers = {‘Authorization‘: mgrToken}
    print (‘MGR操作的URL为:%s‘ % url)
    resp = requests.post(url, data=data, headers=headers)
    print (‘MGR请求响应码:%s‘ % resp.status_code)
    print (‘MGR响应头部:%s‘ % resp.headers)
    print (‘MGR响应内容:%s‘ % resp.text)
    return resp

def statOperation(url, mgrToken, savePath=‘‘):
    # @函数目的:MGR GET操作
    # @参数说明:mgrToken:管理凭证
    # @参数说明:url:各种类型的mgrURL
    # @返回值:请求应答
    headers = {‘Authorization‘: mgrToken}
    print (‘Stat请求操作的URL为:%s‘ % url)
    resp = requests.get(url, headers=headers)
    print (‘Stat请求响应码:%s‘ % resp.status_code)
    print (‘Stat响应头部:%s‘ % resp.headers)
    print (‘Stat响应内容:%s‘ % resp.text)
    if savePath != ‘‘:
        with open(savePath, "wb") as code:
            code.write(resp.text)
        print (‘文件成功保存到本地:%s‘ % savePath)
    return resp

def panadaFops(panadaStreamcutUrl, fops_body):
    # @函数目的:MGR操作
    # @参数说明:fops_body:请求消息体
    # @参数说明:panadaStreamcutUrl:熊猫请求地址
    # @返回值:请求应答
    print u‘视频剪辑的请求body:‘ + fops_body
    print u‘视频剪辑的请求URL:‘ + panadaStreamcutUrl
    resp = requests.post(panadaStreamcutUrl, data=fops_body)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(resp.status_code), resp.headers, resp.text)
    return resp

def panadaStreamCutGetPesistentid(rqStatus, cutStart, cutEnd):
    # @函数目的:MGR操作
    # @参数说明:fops_body:请求消息体
    # @参数说明:panadaStreamcutUrl:熊猫请求地址
    # @返回值:请求应答
    dataJsonStr = rqStatus.text[cutStart:cutEnd]
    print u‘截取data的json串值:‘ + dataJsonStr
    dataJson = json.loads(dataJsonStr)
    persistentId = dataJson["persistentId"]
    print u‘生成的persistentId:‘ + persistentId
    return persistentId

def getPanadaSign(panadaAuthKey, current_time):
    # @函数目的:MGR操作
    # @参数说明:panadaAuthKey:auth串
    # @参数说明:current_time:当前时间
    # @返回值:请求应答
    formd5SignStr = panadaAuthKey + str(current_time)
    print u‘为于鉴权生成md5的字符串为:‘ + formd5SignStr
    sign = get_md5_value_32(formd5SignStr)
    return sign

# 获取文件流大小
def get_io_size(fio):
    """get file size from fio"""
    fio.seek(0, os.SEEK_END)
    fsize = fio.tell()
    fio.seek(0)
    return fsize

# 计算WCS文件HASH值
def getIoWcsEtag(fio):
    """Caculates wcs_etag from file object
    Parameters:
        - fio: file-like object to the file
    Usage:
    >>> data = bytes_chr(0) * (CHUNK_SIZE + 42) * 42
    >>> fio = BytesIO(data)
    >>> print(get_io_wcs_etag(fio))
    lnmoz9lrkr6HWgZyTqu2vD0XUj6R
    Returns wcs_etag
    """
    size = get_io_size(fio)
    flag = CHUNK_BITS
    sha1 = hashlib.sha1
    buf = []
    while size > 0:
        size -= CHUNK_SIZE
        buf.append(sha1(fio.read(CHUNK_SIZE)).digest())
    buf = b‘‘.join(buf)
    if len(buf) > 20:  # more than 1 chunk
        flag |= 0x80
        buf = sha1(buf).digest()
    fio.seek(0)
    return base64.urlsafe_b64encode(bytes_chr(flag) + buf).decode(‘ASCII‘)

# 返回WCS文件HASH值
def getWcsEtag(filename):
    """Caculates wcs_etag
    Parameters:
        - filename: string, file name
    Returns wcs_etag
    """
    with open(filename, ‘rb‘) as fp:
        print (‘文件的WCS HASH值为:%s‘ % getIoWcsEtag(fp))
        return getIoWcsEtag(fp)

# 对字符串进行base64安全编码
def base64SafeUrlEncode(str):
    """urlsafe_b64encode base64SafeUrlEncodeStr
    Parameters:
        - str: string, 需要加密的字符串
    Returns base64SafeUrlEncodeStr
    """
    print (‘字符串base64安全编码前的值为:%s‘ % str)
    print (‘字符串base64安全编码后的值为:%s‘ % base64.urlsafe_b64encode(str))
    return base64.urlsafe_b64encode(str)

# 对字符串进行base64安全解码
def base64SafeUrlDecode(base64SafeEncodeStr):
    """urlsafe_b64decode base64SafeUrlDecodeStr
    Parameters:
        - str: string, 需要加密的字符串
    Returns base64SafeUrlDecodeStr
    """
    print (‘字符串base64安全解码前的值为:%s‘ % base64SafeEncodeStr)
    print (‘字符串base64安全解码后的值为:%s‘ % base64.urlsafe_b64decode(base64SafeEncodeStr))
    return base64.urlsafe_b64decode(base64SafeEncodeStr)

# 对字符串进行base64编码
def base64Encode(str):
    """b64encode b64encodeStr
    Parameters:
        - str: string, 需要加密的字符串
    Returns b64encodeStr
    """
    print (‘字符串base64编码前的值为:%s‘ % str)
    print (‘字符串base64编码后的值为:%s‘ % base64.b64encode(str))
    return base64.b64encode(str)

# 对字符串进行base64解码
def base64Decode(base64EncodeStr):
    """b64decode b64decodeStr
    Parameters:
        - str: string, 需要加密的字符串
    Returns b64decodeStr
    """
    print (‘字符串base64解码前的值为:%s‘ % base64EncodeStr)
    print (‘字符串base64解码后的值为:%s‘ % base64.b64decode(base64EncodeStr))
    return base64.b64decode(base64EncodeStr)

# 获取文件大小
def getFileSize(path):
    try:
        size = os.path.getsize(path)
        print (‘文件大小为:%s‘ % size)
        return size
    except Exception as err:
        print(err)

# 生成上传文件名
def generateUploadFileName(prefix, middleName, suffix):
    fileName = prefix + middleName + suffix
    print (‘上传文件名称为:%s‘ % fileName)
    return fileName

# 生成下载路径
def generateDownUrl(downUri, fileName):
    downloadUrl = downUri + os.path.sep + fileName
    print (‘文件下载URL为:%s‘ % downloadUrl)
    return downloadUrl

# 加载JSON
def jsonReload(text):
    rqStatResponseJson = json.loads(text)
    print (‘json加载之后:%s‘ % str(rqStatResponseJson))
    return rqStatResponseJson

# 获取文件保存路径
def getFilePath(fileDir, fileName):
    filePath = fileDir + fileName
    if ‘test_download‘ in filePath:
        print (‘保存文件路径为:%s‘ % filePath)
    else:
        print (‘对比文件路径为:%s‘ % filePath)
    return filePath

def downloadFileUrllib2(url, savePath):
    # @函数目的: 下载文件,使用urlLib2模块下载
    # @参数说明:url:文件的url路径
    # @参数说明:savePath:文件保存的本地路径
    # @返回值
    try:
        url = url.strip()
        print (‘文件下载地址为:%s‘ % url)
        savePath = savePath.strip()
        print (‘文件保存本地路径:%s‘ % savePath)
        r = urllib2.Request(url)
        req = urllib2.urlopen(r)
        saveFile = open(savePath, ‘wb‘)
        saveFile.write(req.read())
        saveFile.close()
        req.close()
    except Exception, e:
        print str(e)

def downloadFileRequests(url, savePath):
    # @函数目的:下载文件,使用requests模块下载
    # @参数说明:url:文件的URL路径
    # @参数说明:savePath:文件保存的本地路径
    # @返回值
    try:
        url = url.strip()
        print (‘文件URL地址为:%s‘ % url)
        savePath = savePath.strip()
        print (‘文件保存本地路径:%s‘ % savePath)
        r = requests.get(url)
        print (‘下载请求响应码:%s‘ % r.status_code)
        print (‘下载响应头部:%s‘ % r.headers)
        with open(savePath, "wb") as code:
            code.write(r.content)
        print (‘文件成功保存到本地:%s‘ % savePath)
        return r
    except Exception, e:
        print str(e)

def postFileRequests(url, savePath):
    # @函数目的:下载文件,使用requests模块下载
    # @参数说明:url:文件的URL路径
    # @参数说明:savePath:文件保存的本地路径
    # @返回值
    try:
        url = url.strip()
        print (‘文件URL地址为:%s‘ % url)
        savePath = savePath.strip()
        print (‘文件保存本地路径:%s‘ % savePath)
        r = requests.post(url)
        print (‘下载请求响应码:%s‘ % r.status_code)
        print (‘下载响应头部:%s‘ % r.headers)
        with open(savePath, "wb") as code:
            code.write(r.content)
        print (‘文件成功保存到本地:%s‘ % savePath)
        return r
    except Exception, e:
        print str(e)

def getRequestWithoutSave(url):
    # @函数目的:单纯get请求,不进行下载
    # @参数说明:url:文件的URL路径
    # @返回值 下载响应
    url = url.strip()
    print (‘文件URL地址为:%s‘ % url)
    # r = requests.get(url, encoding=‘ISO-8859-1‘)
    r = requests.get(url)
    print (‘下载请求响应码:%s‘ % r.status_code)
    print (‘下载响应头部:%s‘ % r.headers)
    # print (‘下载响应内容类型:%s‘ % type(r.content))
    # print (‘下载响应内容:%s‘ % r.content)
    return r

def options(url):
    # @函数目的:options请求
    # @参数说明:url:文件的URL路径
    # @返回值 下载响应
    url = url.strip()
    print (‘文件URL地址为:%s‘ % url)
    rq = requests.options(url)
    print ‘状态码code:{0}\n响应头header:{1}\n响应消息体:{2}\n‘.format(str(rq.status_code), rq.headers, rq.text)
    return rq

def reMatch(reRule, str):
    # @函数目的:判断字符串是否匹配某正则表达式
    # @参数说明:reRule:正则表达式, str 字符串
    # @返回值 判断匹配结果
    an = re.match(reRule, str)
    print an
    if an:
        print u‘匹配成功‘
        return True
    else:
        print u‘匹配失败‘
        return False

def downloadFileRequestsWithHeaders(url, savePath, headers):
    # @函数目的:range下载文件,使用requests模块下载
    # @参数说明:url:文件的URL路径
    # @参数说明:savePath:文件保存的本地路径
    # @参数说明:headers:下载带的头部
    # @返回值
    try:
        url = url.strip()
        print (‘文件URL地址为:%s‘ % url)
        savePath = savePath.strip()
        print (‘文件保存本地路径:%s‘ % savePath)
        print (‘下载附加的HEADER内容为:%s‘ % headers)
        r = requests.get(url, headers=headers)
        print (‘下载请求响应码:%s‘ % r.status_code)
        print (‘下载响应头部:%s‘ % r.headers)
        with open(savePath, "wb") as code:
            code.write(r.content)
        print (‘文件成功保存到本地:%s‘ % savePath)
        return r
    except Exception, e:
        print str(e)

def headFileRequests(url):
    # @函数目的:获取文件head信息
    # @参数说明:url:文件的URL路径
    # @返回值
    url = url.strip()
    print (‘文件URL地址为:%s‘ % url)
    r = requests.head(url)
    print (‘HEAD请求响应码:%s‘ % r.status_code)
    print (‘HEAD响应头部:%s‘ % r.headers)
    return r

def headFileRequestsWithHeader(url, headers):
    # @函数目的:获取文件head信息
    # @参数说明:url:文件的URL路径
    # @返回值
    url = url.strip()
    print (‘文件URL地址为:%s‘ % url)
    print (‘HEAD附加的HEADER内容为:%s‘ % headers)
    r = requests.head(url, headers=headers)
    print (‘HEAD请求响应码:%s‘ % r.status_code)
    print (‘HEAD响应头部:%s‘ % r.headers)
    return r

# putPolicy 参数生成
def get_putPolicy(**kwargs):
    putPolicydict = {}
    for value in kwargs:
        putPolicydict[value] = kwargs[value]
    return putPolicydict

# 获取分片文件的信息,返回每个片的大小,及总文件大小
def Get_File_Size(filepath):
    file_dict = {}
    total = 0
    num = 0
    for root, dirs, files in os.walk(filepath):
        pass
    for i in files:
        file_0 = os.path.getsize(os.path.join(root, i))
        file_dict[i] = file_0
        total += file_0
        num += 1
    return file_dict, total

# 用于统计计算每个块分配的上传分片
‘‘‘
    @blk 为预设好的上传的块数
    @filepath 为分片文件的目录
    错误结果都返回-1
‘‘‘

def Count_Blk(blk, filepath):
    return_msg = {}
    file_info = Get_File_Size(filepath)  # 返回的文件夹内的分片文件信息:每个片的名字,每片的大小,总大小
    file_num = len(file_info[0])  # 总共几个分片文件
    multiple = file_num / blk  # 倍数
    remainder = file_num % blk  # 计算分片中总数和块数的余数
    # 如果分片总数比分片的块数少,返回错误结果
    if file_num < blk:
        return_msg[‘msg‘] = u‘分片总数不能少于分块数‘
        return_msg[‘code‘] = -1
        return return_msg  # 如果分片总数大于分块数,返回错误结果-1
    # 处理分片总数在blk 和2倍的bkl 之间
    elif file_num > blk and file_num < blk * 2:
        fist_blk = []  # 用于写入分片的名称
        difference_value = blk - remainder  # 计算分块数和余数的差值,通过该值判断是否需要将第一块的分片数减少
        if difference_value == 1:  # difference_value为1的时候,表示剩下的余数每个作为一个分片
            for i in range(blk):
                fist_blk.append(i)
            return_msg[str(1)] = fist_blk  # 将每一块的分片名称写入字段,key表示第几块,value值表示该块的分片片数,以list存在,本处写第一块
            for j in range(1, remainder + 1):  # 取余数值,每个余数加上块数值等于该位置的块的分片
                return_msg[str(j + 1)] = [blk + j - 1]
            return return_msg
        else:  # difference_value不为为1的时候,根据块数和余数的差值,计算需要从第一块中减少几片作为其他块的片
            fist_num = file_num - blk + 1  # 计算第一块应该得到的分片数
            for i in range(fist_num):
                fist_blk.append(i)
            return_msg[str(1)] = fist_blk  # 将每一块的分片名称写入字段,key表示第几块,value值表示该块的分片片数,以list存在,本处写第一块
            for j in range(1, blk):  # 取余数值,每个余数加上块数值等于该位置的块的分片
                return_msg[str(j + 1)] = [fist_num + j - 1]
            return return_msg
    # 处理分块数等于分片数
    elif file_num == blk:
        for i in range(1, blk + 1):  # 分片数等于块数,即每片为一块
            return_msg[str(i)] = [i - 1]
        return return_msg
    # 处理分片数大于俩倍的分块数,且不能倍分块数整除
    elif file_num > blk * 2 and remainder != 0:
        for i in range(1, blk + 1):
            chunk_list = []
            append_num = (i - 1) * multiple  # 递增值,入第二块的余第一块相对应的位置都是多出blk大小
            if i < blk:  # 块数的位移值比分块数小的时候计算,(即除最后一块外,前面几块的分片都是平均的)
                for j in range(multiple):
                    chunk_list.append(j + append_num)
                return_msg[str(i)] = chunk_list
            else:  # 最后一块的处理,最后一块的分片数会比其他块多出一些
                el_num = file_num - append_num  # 总数减去此处的递增值,为最后一个块要循环的片数
                for x in range(el_num):
                    chunk_list.append(x + append_num)
                return_msg[str(i)] = chunk_list
        return return_msg
    # 处理分片数刚好是分块数的倍数
    elif remainder == 0 and file_num != blk:
        for i in range(1, blk + 1):
            chunk_list = []
            append_num = (i - 1) * multiple  # 递增值,入第二块的余第一块相对应的位置都是多出blk大小
            for j in range(multiple):
                chunk_list.append(j + append_num)
            return_msg[str(i)] = chunk_list
        return return_msg
    else:
        return ‘other error‘

‘‘‘
计算每块所需的块大小
@ chunk_info 分片文件的信息和文件总大小
@ mkbl_num 分几块上传
@ blk_info 块和分片数量关系列表
‘‘‘

def blk_slicenum_count(chunk_info, mkbl_num, blk_info):
    total_list = []  # 按块的顺序添加每个块所需上传的分片数,
    sum_list = []  # 按块的顺序添加每块的需要开辟的总大小
    info = chunk_info[0]  # 每个块的大小信息
    for i in range(1, int(mkbl_num) + 1):  # 通过循环取出每个块的分片文件,按list放入到total_list 中
        total_list.append(blk_info.get(str(i)))
    for j in total_list:  # 从total_list中按块顺序取出片的list
        sum = 0  # 用于统计每块的大小,一块统计结束后,第二块开始该值置0
        for x in j:  # 循环取出每分片的大小,按名称获取大小,然后进行sum合计,并放入list中
            a = info.get(str(x))
            sum += a
        sum_list.append(sum)
    return sum_list

# 上传档案共通方法
def archive_post(filepath, upload_token, requesturl, urlpath, ak, sk, date, description, fileSha256Hash,
                 fileTreeSha256Hash):
    header = {}
    header[‘Authorization‘] = upload_token
    header[‘Date‘] = date
    header[‘x-was-archive-description‘] = description
    header[‘x-was-content-sha256‘] = fileSha256Hash
    header[‘x-was-sha256-tree-hash‘] = fileTreeSha256Hash
    print u‘请求头部:\n‘, header

    url = requesturl + urlpath
    print u‘请求的url信息:\n‘, url
    with open(filepath) as file:
        data = file.read()
        rq = requests.post(url=url, headers=header, data=data)
    showRes(rq)
    # 校验请求返回的响应状态码,打印返回头部内容
    result = asserResult_201(rq)
    if result == ‘SUCCESS‘:
        archiveid = rq.headers[‘x-was-archive-id‘]
        print u‘>>>>上传档案成功,archiveid为:‘, archiveid
    else:
        vaultid = ‘upload failed !‘
        print u‘>>>>上传档案失败!‘
        archiveid = ‘‘
    return archiveid

# 上传档案共通方法
def archive_post_rq(filepath, upload_token, requesturl, urlpath, ak, sk, date, description, fileSha256Hash,
                    fileTreeSha256Hash):
    header = {}
    header[‘Authorization‘] = upload_token
    header[‘Date‘] = date
    header[‘x-was-archive-description‘] = description
    header[‘x-was-content-sha256‘] = fileSha256Hash
    header[‘x-was-sha256-tree-hash‘] = fileTreeSha256Hash
    print u‘请求头部:\n‘, header

    url = requesturl + urlpath
    print u‘请求的url信息:\n‘, url
    with open(filepath) as file:
        data = file.read()
        rq = requests.post(url=url, headers=header, data=data)
    showRes(rq)
    return rq

def asserResult_201(rq):
    # 展示响应信息
    status = str(rq.status_code)
    print ‘status:‘, status
    if status == ‘201‘:
        return ‘SUCCESS‘
    else:
        return ‘FAIL‘

def get_inventory_jobid_nofile(rq):
    message = eval(rq.text)
    jobid = message.get(‘x-was-job-id‘)
    return jobid

# 上传文件所需的token
‘‘‘
@urlpath upload请求的参数
@RequestParams 请求数据内容,带入为字典
@ak,sk  系统的aksk 信息
‘‘‘

def generate_token_upload(putPolicy, ak, sk, ):
    postdata = JSONEncoder().encode(putPolicy)  # 将字典转成json 格式,不然接口解析不了
    print ‘json格式的putPolicy信息:\n‘, postdata
    encodePutPolicy = base64.b64encode(postdata)
    print u‘鉴权信息encodePutPolicy:\n‘, ‘\n‘ + encodePutPolicy + ‘\n‘
    my_sign = hmac.new(sk, encodePutPolicy, sha1).hexdigest()
    encodedSign = base64.b64encode(my_sign)
    token = ak + ‘:‘ + encodedSign + ‘:‘ + encodePutPolicy
    print u‘生成的token信息:\n‘, token
    return token

# 查询DB操作
def excute_sql(database, user, password, host, port, sqlStr):
    # 数据库连接参数
    conn = psycopg2.connect(database=database, user=user, password=password, host=host, port=port)
    cur = conn.cursor()
    cur.execute(sqlStr)
    rows = cur.fetchall()  # all rows in table
    print u‘展示查询结果:‘
    print(rows)
    conn.commit()
    cur.close()
    conn.close()
    return rows

# 更新DB操作
def excute_sql_update(database, user, password, host, port, sqlStr):
    # 数据库连接参数
    print u‘sql语句为:‘, sqlStr
    conn = psycopg2.connect(database=database, user=user, password=password, host=host, port=port)
    cur = conn.cursor()
    cur.execute(sqlStr)
    conn.commit()
    cur.close()
    conn.close()

‘‘‘
 @查询redis操作
 @默认op=0,查询
 @op=1,删除
‘‘‘

def excute_redis(host, port, password, keyStr, op=0):
    ‘‘‘
    这种连接是连接一次就断了,耗资源.端口默认6379,就不用写
    ‘‘‘
    r = redis.Redis(host=host, port=port, password=password)
    if op == 0:
        return r.hgetall(keyStr)
    else:
        return r.delete(keyStr)

def compareFiles(getFilePath, diffFilePath):
    # 对比两个文件是否一致
    print ‘下载结果文件:‘, getFilePath
    print ‘用于对比文件:‘, diffFilePath
    cmpResult = filecmp.cmp(getFilePath, diffFilePath)
    print ‘两个文件是否一致‘, cmpResult
    return cmpResult

def getBucket(headers, s3url):
    # S3请求获取bucket信息
    print ‘请求headers: ‘, headers
    print ‘s3url: ‘, s3url
    rq = requests.get(url=s3url, headers=headers)
    return rq

def showRes(rq):
    status = str(rq.status_code)
    print ‘HTTP Status Code: ‘, status
    print ‘返回结果:‘, rq.text
    print ‘响应头部:‘, rq.headers

def forWait(waitTime):
    # 设置等待时间
    print (‘等待处理完成,等待时间为(秒):%s‘ % waitTime)
    time.sleep(waitTime)

def showRespose(rq, getFileName):
    # S3展示响应信息
    status = str(rq.status_code)
    print ‘HTTP Status Code: ‘, status
    print ‘返回结果:‘, rq.text
    print ‘响应头部:‘, rq.headers
    # 将返回结果保存到本地
    print ‘保存文件路径:‘, getFileName
    f = open(getFileName, ‘w+‘)
    print >> f, rq.text
    f.close()

def asserResult(rq, getFileName, diffFileName):
    # S3展示响应信息
    print ‘对比文件路径:‘, diffFileName
    status = str(rq.status_code)
    diff_command = "diff " + getFileName + " " + diffFileName
    xml_compare = os.popen(diff_command)
    diff = xml_compare.read()
    print ‘比对结果‘, diff
    if status == ‘200‘ and diff == ‘‘:
        return ‘Success‘
    else:
        return ‘Fail‘

def excuteSuiteExportReport(reportFileName, suite):
    fr = open(reportFileName, ‘wb‘)
    report = HTMLTestRunner.HTMLTestRunner(stream=fr, verbosity=2, title=‘测试报告‘, description=‘测试报告详情‘)
    test_result = report.run(suite)
    return test_result

def analysisResult(test_result):
    failure_count = test_result.failure_count
    error_count = test_result.error_count
    print("用例执行失败个数为%s" % failure_count)
    print("用例执行错误个数为%s" % error_count)
    if failure_count == 0 and error_count == 0:
        print "正常退出"
    else:
        print "异常退出"
        raise Exception("用例执行失败")

def main():
    print ‘pass‘

if __name__ == ‘__main__‘:
    main()

原文地址:https://www.cnblogs.com/channy14/p/11547269.html

时间: 2024-10-15 20:48:59

工作记录:项目基础库文件BaseFun.py的相关文章

项目架构基础之二 ---- 工程中创建静态库文件(.a)库文件

步骤1.创建工程HLStaticTest 步骤2.创建静态库文件 步骤3. 关闭HLStatic,回到HLStaticTest Targets - >Build Phases ->Link Binary With Libraries 然后进入到  Targets -> Build Setting ->Header Search Paths 步骤 4. 进入AppDelegate 工程中创建静态库文件(.a)库文件 成功 版权声明:本文为博主原创文章,未经博主允许不得转载.

Datax与hadoop2.x兼容部署与实际项目应用工作记录分享

一.概述 Hadoop的版本更新挺快的,已经到了2.4,但是其周边工具的更新速度还是比较慢的,一些旧的周边工具版本对hadoop2.x的兼容性做得还不完善,特别是sqoop.最近,在为hadoop2.2.0找适合的sqoop版本时遇到了很多问题.尝试了多个sqoop1.4.x版本的直接简单粗暴的报版本不兼容问题,其中测了sqoop-1.4.4.bin__hadoop-0.23这个版本,在该版本中直接用sqoop的脚本export HDFS的数据是没有问题的,但是一旦调用JAVA API来进行对H

iOS项目引用静态库文件.a

由于开发的项目中使用了静态库文件(.a文件),因此专门对它进行了简单的学习.静态库文件包装的是经常使用的公共代码,便于代码的复用. 1.生成静态库文件(static library file).主要包括以下几个主要步骤: (1)创建静态库项目 命名为Stone,并在生成的Stone类里面添加一个类方法+printInfo,如图: 未编译前,Products文件夹下的libStone.a是红色的,编译之后变为黑色,如图: 注意顶部选择"iOS device"生成用于真机的静态库文件,选择

自己总结的 iOS ,Mac 开源项目以及库,知识点------持续更新

自己在 git  上看到一个非常好的总结的东西,但是呢, fork  了几次,就是 fork  不到我的 git 上,干脆复制进去,但是,也是认真去每一个每一个去认真看了,并且也是补充了一些,感觉非常棒,所以好东西要分享,为啥用 CN 博客,有个好处,可以随时修改,可以持续更新,不用每次都要再发表,感觉这样棒棒的 我们 自己总结的iOS.mac开源项目及库,持续更新.... github排名 https://github.com/trending,github搜索:https://github.

很详细、很移动的Linux makefile教程:介绍,总述,书写规则,书写命令,使用变量,使用条件推断,使用函数,Make 的运行,隐含规则 使用make更新函数库文件 后序

很详细.很移动的Linux makefile 教程 内容如下: Makefile 介绍 Makefile 总述 书写规则 书写命令 使用变量 使用条件推断 使用函数 make 的运行 隐含规则 使用make更新函数库文件 后序 近期在学习Linux下的C编程,买了一本叫<Linux环境下的C编程指南>读到makefile就越看越迷糊,可能是我的理解能不行. 于是google到了以下这篇文章.通俗易懂.然后把它贴出来,方便学习. 后记,看完发现这篇文章和<Linux环境下的C编程指南>

Android开源项目及库搜集

TimLiu-Android 自己总结的Android开源项目及库. github排名 https://github.com/trending,github搜索:https://github.com/search 目录 UI 卫星菜单 节选器 下拉刷新 模糊效果 HUD与Toast 进度条 UI其它 动画 网络相关 响应式编程 地图 数据库 图像浏览及处理 视频音频处理 测试及调试 动态更新热更新 消息推送 完整项目 插件 出名框架 其他 好的文章 收集android上开源的酷炫的交互动画和视觉

python17:基础库概述

这里对python的基础库做一个简要概述,包括每个模块的基本功能和常用操作.更详细的信息请参考Python库参考. 操作系统接口 操作系统接口对应os模块,用于与操作系统交互: >>> import os >>> os.getcwd() # 返回当前的工作目录 'C:\\Python34' >>> os.chdir('/server/accesslogs') # 改变当前工作目录 >>> os.system('mkdir today'

创建Django项目基础

目录 创建Django项目基础 首先介绍一下必备三模块 静态文件配置 什么是静态文件 手动配置静态文件访问资源 接口前缀 动态解析 form表单 request对象及方法 什么是request对象 如何获取请求方式 pycharm中django连接数据库 django连接MySQL django orm介绍 orm对象关系映射 优缺点: 注意事项: 怎样去创建一张表? 数据库迁移(同步)命令[很重要!!!] 字段和数据的增删改查 字段的增删改查 数据(记录)的增删改查 查询数据get与filte

雷观(十二):构建基础库,提升研发效率

观点:IT之所以迅速普及,进入到每个人的生活.一方面是由于,IT相关技术产品,给每个人带来了丰富的虚拟世界和娱乐生活.另一方面,它大大提升了大部分行业的生产效率.社会生产力的发展方向,就是时代的发展方向.   程序员等IT技术人员,在IT届的地位,就像IT在社会生活中的地位:不可或缺.普遍存在.生产力的关键. 作为一名自学6年多的程序员,对技术的追求从未曾放弃.但我也终究意识到,纯粹的技术无法带我到达理想境界,而生产力的提高才可以把我从繁杂的基础工作中解脱出来,可以把我带向幸福美好的生活中. 与