python语言磁力搜索引擎源码公开,基于DHT协议

之前我在写百度网盘爬虫百度图片爬虫的时候答应网友说,抽时间要把ok搜搜的的源码公开,如今是时候兑现诺言了,下面就是爬虫的所有代码,完全,彻底的公开,你会不会写程序都可以使用,不过请先装个linux系统,具备公网条件,然后运行:

python startCrawler.py

有必要提醒你,数据库字段代码中都有,请你自己建张表格,这个太简单了,就不多说了。同时我也提供一下下载地址,源码都在:下载地址1 下载地址2

#!/usr/bin/env python
# encoding: utf-8
"""
author:haoning
create time:2015.8.1
"""
import hashlib
import os
import time
import datetime
import traceback
import sys
import random
import json
import socket
import threading
from hashlib import sha1 #进行hash加密
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from collections import deque
from Queue import Queue

import MySQLdb as mdb  #数据库连接器

import metautils
import downloadTorrent
from bencode import bencode, bdecode
import pygeoip

DB_HOST = ‘127.0.0.1‘
DB_USER = ‘root‘
DB_PASS = ‘root‘

BOOTSTRAP_NODES = (
    ("67.215.246.10", 6881),
    ("82.221.103.244", 6881),
    ("23.21.224.150", 6881)
)
RATE = 1 #调控速率
TID_LENGTH = 2
RE_JOIN_DHT_INTERVAL = 3
TOKEN_LENGTH = 2
INFO_HASH_LEN = 500000 #50w数据很小,限制内存不至于消耗太大
CACHE_LEN = 100 #更新数据库缓存
WAIT_DOWNLOAD = 80

geoip = pygeoip.GeoIP(‘GeoIP.dat‘)

def is_ip_allowed(ip):
    country = geoip.country_code_by_addr(ip)
    if country in (‘CN‘,‘TW‘,‘JP‘,‘HK‘, ‘KR‘):
        return True
    return False

def entropy(length):
    return "".join(chr(randint(0, 255)) for _ in xrange(length))

def random_id():
    h = sha1()
    h.update(entropy(20))
    return h.digest()

def decode_nodes(nodes):
    n = []
    length = len(nodes)
    if (length % 26) != 0:
        return n

    for i in range(0, length, 26):
        nid = nodes[i:i+20]
        ip = inet_ntoa(nodes[i+20:i+24])
        port = unpack("!H", nodes[i+24:i+26])[0]
        n.append((nid, ip, port))

    return n

def timer(t, f):
    Timer(t, f).start()

def get_neighbor(target, nid, end=10):
    return target[:end]+nid[end:]

class KNode(object):

    def __init__(self, nid, ip, port):
        self.nid = nid
        self.ip = ip
        self.port = port

class DHTClient(Thread):

    def __init__(self, max_node_qsize):
        Thread.__init__(self)
        self.setDaemon(True)
        self.max_node_qsize = max_node_qsize
        self.nid = random_id()
        self.nodes = deque(maxlen=max_node_qsize)

    def send_krpc(self, msg, address):
        try:
            self.ufd.sendto(bencode(msg), address)
        except Exception:
            pass

    def send_find_node(self, address, nid=None):
        nid = get_neighbor(nid, self.nid) if nid else self.nid
        tid = entropy(TID_LENGTH)
        msg = {
            "t": tid,
            "y": "q",
            "q": "find_node",
            "a": {
                "id": nid,
                "target": random_id()
            }
        }
        self.send_krpc(msg, address)

    def join_DHT(self):
        for address in BOOTSTRAP_NODES:
            self.send_find_node(address)

    def re_join_DHT(self):
        if len(self.nodes) == 0:
            self.join_DHT()
        timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)

    def auto_send_find_node(self):
        wait = 1.0 / self.max_node_qsize
        while True:
            try:
                node = self.nodes.popleft()
                self.send_find_node((node.ip, node.port), node.nid)
            except IndexError:
                pass
            try:
                sleep(wait)
            except KeyboardInterrupt:
                os._exit(0)

    def process_find_node_response(self, msg, address):
        nodes = decode_nodes(msg["r"]["nodes"])
        for node in nodes:
            (nid, ip, port) = node
            if len(nid) != 20: continue
            if ip == self.bind_ip: continue
            n = KNode(nid, ip, port)
            self.nodes.append(n)

class DHTServer(DHTClient): #获得info_hash

    def __init__(self, master, bind_ip, bind_port, max_node_qsize):
        DHTClient.__init__(self, max_node_qsize)

        self.master = master
        self.bind_ip = bind_ip
        self.bind_port = bind_port
        self.speed=0

        self.process_request_actions = {
            "get_peers": self.on_get_peers_request,
            "announce_peer": self.on_announce_peer_request,
        }

        self.ufd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
        self.ufd.bind((self.bind_ip, self.bind_port))

        timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)

    def run(self):
        self.re_join_DHT()
        while True:
            try:
                (data, address) = self.ufd.recvfrom(65536)
                msg = bdecode(data)
                self.on_message(msg, address)
            except Exception:
                pass

    def on_message(self, msg, address):
        global RATE #设为全局量
        try:
            if msg["y"] == "r":
                if msg["r"].has_key("nodes"):
                    self.process_find_node_response(msg, address) #发现节点
            elif msg["y"] == "q":
                try:
                    self.speed+=1
                    if self.speed % 10000 ==0:
                        RATE=random.randint(1,3)
                        if RATE==2:
                            RATE=1
                        if RATE==3:
                            RATE=10
                        if self.speed>100000:
                            self.speed=0
                    if self.speed % RATE==0: #数据过多,占用cpu太多,划分限速,1,1,10
                        self.process_request_actions[msg["q"]](msg, address) #处理其他节点的请求,这个过程获取info_hash
                    #self.process_request_actions[msg["q"]](msg, address) #处理其他节点的请求,这个过程获取info_hash
                except KeyError:
                    self.play_dead(msg, address)
        except KeyError:
            pass

    def on_get_peers_request(self, msg, address):
        try:
            infohash = msg["a"]["info_hash"]
            tid = msg["t"]
            nid = msg["a"]["id"]
            token = infohash[:TOKEN_LENGTH]
            msg = {
                "t": tid,
                "y": "r",
                "r": {
                    "id": get_neighbor(infohash, self.nid),
                    "nodes": "",
                    "token": token
                }
            }
            self.master.log(infohash, address)
            self.send_krpc(msg, address)
        except KeyError:
            pass

    def on_announce_peer_request(self, msg, address):
        try:
            infohash = msg["a"]["info_hash"]
            token = msg["a"]["token"]
            nid = msg["a"]["id"]
            tid = msg["t"]

            if infohash[:TOKEN_LENGTH] == token:
                if msg["a"].has_key("implied_port ") and msg["a"]["implied_port "] != 0:
                    port = address[1]
                else:
                    port = msg["a"]["port"]
                self.master.log_announce(infohash, (address[0], port))
        except Exception:
            print ‘error‘
            pass
        finally:
            self.ok(msg, address)

    def play_dead(self, msg, address):
        try:
            tid = msg["t"]
            msg = {
                "t": tid,
                "y": "e",
                "e": [202, "Server Error"]
            }
            self.send_krpc(msg, address)
        except KeyError:
            pass

    def ok(self, msg, address):
        try:
            tid = msg["t"]
            nid = msg["a"]["id"]
            msg = {
                "t": tid,
                "y": "r",
                "r": {
                    "id": get_neighbor(nid, self.nid)
                }
            }
            self.send_krpc(msg, address)
        except KeyError:
            pass

class Master(Thread): #解析info_hash

    def __init__(self):
        Thread.__init__(self)
        self.setDaemon(True)
        self.queue = Queue()
        self.cache = Queue()
        self.count=0
        self.mutex = threading.RLock() #可重入锁,使单线程可以再次获得已经获得的?
        self.waitDownload = Queue()
        self.metadata_queue = Queue()
        self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, ‘oksousou‘, charset=‘utf8‘)
        self.dbconn.autocommit(False)
        self.dbcurr = self.dbconn.cursor()
        self.dbcurr.execute(‘SET NAMES utf8‘)
        self.visited = set()

    def lock(self): #加锁
        self.mutex.acquire()

    def unlock(self): #解锁
        self.mutex.release()

    def work(self,item):

        print "start thread",item
        while True:
            self.prepare_download_metadata()
            self.lock()
            self.download_metadata()
            self.unlock()

            self.lock()
            self.got_torrent()
            self.unlock()

    def start_work(self,max):

        for item in xrange(max):
            t = threading.Thread(target=self.work, args=(item,))
            t.setDaemon(True)
            t.start()

    #入队的种子效率更高
    def log_announce(self, binhash, address=None):
        if self.queue.qsize() < INFO_HASH_LEN : #大于INFO_HASH_LEN就不要入队,否则后面来不及处理
            if is_ip_allowed(address[0]):
                self.queue.put([address, binhash]) #获得info_hash

    def log(self, infohash, address=None):
        if self.queue.qsize() < INFO_HASH_LEN: #大于INFO_HASH_LEN/2就不要入队,否则后面来不及处理
            if is_ip_allowed(address[0]):
                self.queue.put([address, infohash])

    def prepare_download_metadata(self):

        if self.queue.qsize() == 0:
            sleep(2)
        #从queue中获得info_hash用来下载
        address, binhash= self.queue.get()
        if binhash in self.visited:
            return
        if len(self.visited) > 100000: #大于100000重置队列,认为已经访问过了
            self.visited = set()
        self.visited.add(binhash)
        #跟新已经访问过的info_hash
        info_hash = binhash.encode(‘hex‘)
        utcnow = datetime.datetime.utcnow()

        self.cache.put((address,binhash,utcnow)) #装入缓存队列

    def download_metadata(self):

        if self.cache.qsize() > CACHE_LEN/2: #出队更新下载
            while self.cache.qsize() > 0: #排空队列
                address,binhash,utcnow = self.cache.get()
                info_hash = binhash.encode(‘hex‘)
                self.dbcurr.execute(‘SELECT id FROM search_hash WHERE info_hash=%s‘, (info_hash,))
                y = self.dbcurr.fetchone()
                if y:
                # 更新最近发现时间,请求数
                    self.dbcurr.execute(‘UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s‘, (utcnow, info_hash))
                else:
                    self.waitDownload.put((address, binhash))
            self.dbconn.commit()
            if self.waitDownload.qsize() > WAIT_DOWNLOAD:
                while self.waitDownload.qsize() > 0:
                    address,binhash = self.waitDownload.get()
                    t = threading.Thread(target=downloadTorrent.download_metadata, args=(address, binhash, self.metadata_queue))
                    t.setDaemon(True)
                    t.start()

    def decode(self, s):
        if type(s) is list:
            s = ‘;‘.join(s)
        u = s
        for x in (self.encoding, ‘utf8‘, ‘gbk‘, ‘big5‘):
            try:
                u = s.decode(x)
                return u
            except:
                pass
        return s.decode(self.encoding, ‘ignore‘)

    def decode_utf8(self, d, i):
        if i+‘.utf-8‘ in d:
            return d[i+‘.utf-8‘].decode(‘utf8‘)
        return self.decode(d[i])

    def parse_metadata(self, data): #解析种子
        info = {}
        self.encoding = ‘utf8‘
        try:
            torrent = bdecode(data) #编码后解析
            if not torrent.get(‘name‘):
                return None
        except:
            return None
        detail = torrent
        info[‘name‘] = self.decode_utf8(detail, ‘name‘)
        if ‘files‘ in detail:
            info[‘files‘] = []
            for x in detail[‘files‘]:
                if ‘path.utf-8‘ in x:
                    v = {‘path‘: self.decode(‘/‘.join(x[‘path.utf-8‘])), ‘length‘: x[‘length‘]}
                else:
                    v = {‘path‘: self.decode(‘/‘.join(x[‘path‘])), ‘length‘: x[‘length‘]}
                if ‘filehash‘ in x:
                    v[‘filehash‘] = x[‘filehash‘].encode(‘hex‘)
                info[‘files‘].append(v)
            info[‘length‘] = sum([x[‘length‘] for x in info[‘files‘]])
        else:
            info[‘length‘] = detail[‘length‘]
        info[‘data_hash‘] = hashlib.md5(detail[‘pieces‘]).hexdigest()
        return info

    def got_torrent(self):
        if self.metadata_queue.qsize() == 0:
            return
        binhash, address, data,start_time = self.metadata_queue.get()
        if not data:
            return
        try:
            info = self.parse_metadata(data)
            if not info:
                return
        except:
            traceback.print_exc()
            return

        temp = time.time()
        x = time.localtime(float(temp))
        utcnow = time.strftime("%Y-%m-%d %H:%M:%S",x) # get time now

        info_hash = binhash.encode(‘hex‘) #磁力
        info[‘info_hash‘] = info_hash
        # need to build tags
        info[‘tagged‘] = False
        info[‘classified‘] = False
        info[‘requests‘] = 1
        info[‘last_seen‘] = utcnow
        info[‘create_time‘] = utcnow
        info[‘source_ip‘] = address[0]

        if info.get(‘files‘):
            files = [z for z in info[‘files‘] if not z[‘path‘].startswith(‘_‘)]
            if not files:
                files = info[‘files‘]
        else:
            files = [{‘path‘: info[‘name‘], ‘length‘: info[‘length‘]}]
        files.sort(key=lambda z:z[‘length‘], reverse=True)
        bigfname = files[0][‘path‘]
        info[‘extension‘] = metautils.get_extension(bigfname).lower()
        info[‘category‘] = metautils.get_category(info[‘extension‘])

        try:
            try:
                print ‘\n‘, ‘Saved‘, info[‘info_hash‘], info[‘name‘], (time.time()-start_time), ‘s‘, address[0]
            except:
                print ‘\n‘, ‘Saved‘, info[‘info_hash‘]
            ret = self.dbcurr.execute(‘INSERT INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,‘ +
                ‘length,create_time,last_seen,requests) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)‘,
                (info[‘info_hash‘], info[‘category‘], info[‘data_hash‘], info[‘name‘], info[‘extension‘], info[‘classified‘],
                info[‘source_ip‘], info[‘tagged‘], info[‘length‘], info[‘create_time‘], info[‘last_seen‘], info[‘requests‘]))
            if self.count %50 ==0:
                self.dbconn.commit()
                if self.count>100000:
                    self.count=0
        except:
            print self.name, ‘save error‘, self.name, info
            traceback.print_exc()
            return

if __name__ == "__main__":

    #启动客户端
    master = Master()
    master.start_work(150)

    #启动服务器
    dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
    dht.start()
    dht.auto_send_find_node()

注意,上面的代码有一段代码需要下载种子,所以下面的这段代码十分重要:

#!/usr/bin/env python
# encoding: utf-8
"""
author:haoning
create time:2015.8.1
"""
from hashlib import sha1
import math
from socket import inet_ntoa
import socket
from struct import pack, unpack
from threading import Timer, Thread
from time import sleep, time

from bencode import bencode, bdecode
from startCrawler import entropy

BT_PROTOCOL = "BitTorrent protocol"
BT_MSG_ID = 20
EXT_HANDSHAKE_ID = 0

def random_id():
    hash = sha1()
    hash.update(entropy(20))
    return hash.digest()

def send_packet(the_socket, msg):
    the_socket.send(msg)

def send_message(the_socket, msg):
    msg_len = pack(">I", len(msg))
    send_packet(the_socket, msg_len + msg)

def send_handshake(the_socket, infohash):
    bt_header = chr(len(BT_PROTOCOL)) + BT_PROTOCOL
    ext_bytes = "\x00\x00\x00\x00\x00\x10\x00\x00"
    peer_id = random_id()
    packet = bt_header + ext_bytes + infohash + peer_id

    send_packet(the_socket, packet)

def check_handshake(packet, self_infohash):
    try:
        bt_header_len, packet = ord(packet[:1]), packet[1:]
        if bt_header_len != len(BT_PROTOCOL):
            return False
    except TypeError:
        return False

    bt_header, packet = packet[:bt_header_len], packet[bt_header_len:]
    if bt_header != BT_PROTOCOL:
        return False

    packet = packet[8:]
    infohash = packet[:20]
    if infohash != self_infohash:
        return False

    return True

def send_ext_handshake(the_socket):
    msg = chr(BT_MSG_ID) + chr(EXT_HANDSHAKE_ID) + bencode({"m":{"ut_metadata": 1}})
    send_message(the_socket, msg)

def request_metadata(the_socket, ut_metadata, piece):
    """bep_0009"""
    msg = chr(BT_MSG_ID) + chr(ut_metadata) + bencode({"msg_type": 0, "piece": piece})
    send_message(the_socket, msg)

def get_ut_metadata(data):
    ut_metadata = "_metadata"
    index = data.index(ut_metadata)+len(ut_metadata) + 1
    return int(data[index])

def get_metadata_size(data):
    metadata_size = "metadata_size"
    start = data.index(metadata_size) + len(metadata_size) + 1
    data = data[start:]
    return int(data[:data.index("e")])

def recvall(the_socket, timeout=5):
    the_socket.setblocking(0)
    total_data = []
    data = ""
    begin = time()

    while True:
        sleep(0.05)
        if total_data and time()-begin > timeout:
            break
        elif time()-begin > timeout*2:
            break
        try:
            data = the_socket.recv(1024)
            if data:
                total_data.append(data)
                begin = time()
        except Exception:
            pass
    return "".join(total_data)

def download_metadata(address, infohash, metadata_queue, timeout=5):
    metadata = None
    start_time = time()
    the_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        the_socket.settimeout(timeout)
        the_socket.connect(address)

        # handshake
        send_handshake(the_socket, infohash)
        packet = the_socket.recv(4096)

        # handshake error
        if not check_handshake(packet, infohash):
            return

        # ext handshake
        send_ext_handshake(the_socket)
        packet = the_socket.recv(4096)

        # get ut_metadata and metadata_size
        ut_metadata, metadata_size = get_ut_metadata(packet), get_metadata_size(packet)

        # request each piece of metadata
        metadata = []
        for piece in range(int(math.ceil(metadata_size/(16.0*1024)))): #piece是个控制块,根据控制块下载数据
            request_metadata(the_socket, ut_metadata, piece)
            packet = recvall(the_socket, timeout) #the_socket.recv(1024*17)
            metadata.append(packet[packet.index("ee")+2:])
        metadata = "".join(metadata)

    except socket.timeout:
        pass
    except Exception, e:
        pass
    finally:
        #print "metadata= %s" %(metadata)
        the_socket.close() #确保没回都关闭socket
        if metadata != None: #只让不空的种子入?
            metadata_queue.put((infohash, address, metadata,start_time))

其实下载种子还有一种方式就是借助libtorrent,但这个太耗费cpu了,所以我一般不用他,如下:

#coding: utf8
import threading
import traceback
import random
import time
import os
import socket

import libtorrent as lt

threading.stack_size(200*1024)
socket.setdefaulttimeout(30)

def fetch_torrent(session, ih, timeout):
    name = ih.upper()
    url = ‘magnet:?xt=urn:btih:%s‘ % (name,)
    data = ‘‘
    params = {
        ‘save_path‘: ‘/tmp/downloads/‘,
        ‘storage_mode‘: lt.storage_mode_t(2),
        ‘paused‘: False,
        ‘auto_managed‘: False,
        ‘duplicate_is_error‘: True}
    try:
        handle = lt.add_magnet_uri(session, url, params)
    except:
        return None
    status = session.status()
    handle.set_sequential_download(1)
    meta = None
    down_time = time.time()
    down_path = None
    for i in xrange(0, timeout):
        if handle.has_metadata():
            info = handle.get_torrent_info()
            down_path = ‘/tmp/downloads/%s‘ % info.name()
            #print ‘status‘, ‘p‘, status.num_peers, ‘g‘, status.dht_global_nodes, ‘ts‘, status.dht_torrents, ‘u‘, status.total_upload, ‘d‘, status.total_download
            meta = info.metadata()
            break
        time.sleep(1)
    if down_path and os.path.exists(down_path):
        os.system(‘rm -rf "%s"‘ % down_path)
    session.remove_torrent(handle)
    return meta

def download_metadata(address, binhash, metadata_queue, timeout=20):
    metadata = None
    start_time = time.time()
    try:
        session = lt.session()
        r = random.randrange(10000, 50000)
        session.listen_on(r, r+10)
        session.add_dht_router(‘router.bittorrent.com‘,6881)
        session.add_dht_router(‘router.utorrent.com‘,6881)
        session.add_dht_router(‘dht.transmission.com‘,6881)
        session.add_dht_router(‘127.0.0.1‘,6881)
        session.start_dht()
        metadata = fetch_torrent(session, binhash.encode(‘hex‘), timeout)
        session = None
    except:
        traceback.print_exc()
    finally:
        metadata_queue.put((binhash, address, metadata,start_time))

这个爬虫还是耗费了本人和其他网上高手的很多时间的,请看到这篇博客的朋友保持钻研精神,开源精神,多多交流,秉承分享。本人建立个qq群作为去转盘网的官方群,人数现在也不多,如果有兴趣的话来逛逛吧,多个粉丝去转盘多一份热闹,qq群号:512245829

时间: 2024-10-27 09:29:58

python语言磁力搜索引擎源码公开,基于DHT协议的相关文章

python语言磁力搜索引擎源码公开,基于DHT协议,十二分有技术含量的技术博客

之前我在写百度网盘爬虫,百度图片爬虫的时候答应网友说,抽时间要把ok搜搜的的源码公开,如今是时候兑现诺言了,下面就是爬虫的所有代码,完全,彻底的公开,你会不会写程序都可以使用,不过请先装个linux系统,具备公网条件,然后运行: python startCrawler.py 有必要提醒你,数据库字段代码中都有,请你自己建张表格,这个太简单了,就不多说了.同时我也提供一下下载地址,源码都在:下载地址1 下载地址2 代码如下: #!/usr/bin/env python # encoding: ut

AtomicInteger源码分析——基于CAS的乐观锁实现

AtomicInteger源码分析--基于CAS的乐观锁实现 1. 悲观锁与乐观锁 我们都知道,cpu是时分复用的,也就是把cpu的时间片,分配给不同的thread/process轮流执行,时间片与时间片之间,需要进行cpu切换,也就是会发生进程的切换.切换涉及到清空寄存器,缓存数据.然后重新加载新的thread所需数据.当一个线程被挂起时,加入到阻塞队列,在一定的时间或条件下,在通过notify(),notifyAll()唤醒回来.在某个资源不可用的时候,就将cpu让出,把当前等待线程切换为阻

Android源码开发利器——Java源码调试(基于4.1.2)

原文地址:http://blog.csdn.net/jinzhuojun/article/details/8868038 调试Android Java源码 草帽的后花园--Neo 写在之前的话:这里主要是以调试Java源码为主,应该说是在system_process之后的源码,这对于调试和修改frameworks层的人来说真是一个利器,但至于为什么在system_process之后,我还在分析,如果有结果我会更新此文章,并正在尝试调试C++的代码,就是native中的代码,如果这个可行那将会大大

利用python广西快乐十分源码出租爬取网易云歌手top50歌曲歌词

python广西快乐十分源码出租 dsluntan.com Q:3393756370 VX:17061863513近年来,发展迅速,成为了最炙手可热的语言. 那么如何来进行网易云歌手top50的歌曲歌词爬取呢 首先进行网易云并进行喜欢的歌手搜索如下: 在这里需要注意的是http://music.163.com/#/artist?id=1007170并不是真的我们需要的连接,真实的链接应该是http://music.163.com/artist?id=1007170 搞清楚了连接的问题之后,就要进

Spark资源调度机制源码分析--基于spreadOutApps及非spreadOutApps两种资源调度算法

Spark资源调度机制源码分析--基于spreadOutApps及非spreadOutApps两种资源调度算法 1.spreadOutApp尽量平均分配到每个executor上: 2.非spreadOutApp尽量在使用单个executor的资源. 源码分析 org.apache.spark.deploy.master.Master 1.首先判断,master状态不是ALIVE的话,直接返回2.调度driver3. Application的调度机制(核心之核心,重中之重) 源码如下: 1 /*

[Go语言]从Docker源码学习Go——init()方法和identifier首字母大小写区分

init()方法 如果想在一个go文件里,进行一些初始化的工作,可以把代码放到init()方法中. init()方法先被执行. func init() { // initialization of package } 注意:它是没有输入参数和返回参数的. identifier首字母大小写区分 当identifier(比如constant,variable,type,function,struct field,...)的命名首字母是大写的话,那表示这个identifier在包外是可以见的: 类似p

[Go语言]从Docker源码学习Go——main函数

Go程序从main包下的main函数开始执行,当main执行结束后,程序退出. Docker的main函数在 docker/docker/docker.go package main //Import needed packages; import ( ... "github.com/docker/docker/api" ... ) //define const; const ( defaultCaFile = "ca.pem" defaultKeyFile = &

[Go语言]从Docker源码学习Go——指针和Structs

这两天在看reflect这个包在Docker中的使用时,遇到了各种问题,最后虽然知道怎么用了. 但是对于这块的原理还不是太懂,于是把"THE WAY TO GO"中关键的几章看了下. 继续坚持往下写,争取能说明白. 源码 还是先看Docker中源码, docker/api/client/cli.go type DockerCli struct { proto string addr string configFile *registry.ConfigFile in io.ReadClo

[Go语言]从Docker源码学习Go——结构和函数的定义

Docker在最近很火,而作为Docker的开发语言-Go也再次被大家提到. 已经使用Docker一段时间了,但是对于源码,尤其是其开发语言Go却一直是一知半解. 最近准备利用空余时间从Docker源代码入手来学习一下Go,同时对Docker的实现也希望可以提高一个层次. 有兴趣的可以一起讨论,学习. 准备工作: 1. Docker源代码https://github.com/docker/docker (版本1.1.2) 2. 安装开发工具LiteIDE, 这个是官方的Go语言的IDE 3. G