/**********************************************************************//** Creates a new segment. @return the block where the segment header is placed, x-latched, NULL if could not create segment because of lack of space */ UNIV_INTERN buf_block_t* fseg_create_general( /*================*/ ulint space, /*!< in: space id */ ulint page, /*!< in: page where the segment header is placed: if this is != 0, the page must belong to another segment, if this is 0, a new page will be allocated and it will belong to the created segment */ ulint byte_offset, /*!< in: byte offset of the created segment header on the page */ ibool has_done_reservation, /*!< in: TRUE if the caller has already done the reservation for the pages with fsp_reserve_free_extents (at least 2 extents: one for the inode and the other for the segment) then there is no need to do the check for this individual operation */ mtr_t* mtr) /*!< in: mtr */ { ulint flags; ulint zip_size; fsp_header_t* space_header; fseg_inode_t* inode; //typedef byte fseg_inode_t; ib_id_t seg_id; buf_block_t* block = 0; /* remove warning */ fseg_header_t* header = 0; /* remove warning */ rw_lock_t* latch; ibool success; ulint n_reserved; ulint i; ut_ad(mtr); ut_ad(byte_offset + FSEG_HEADER_SIZE <= UNIV_PAGE_SIZE - FIL_PAGE_DATA_END); latch = fil_space_get_latch(space, &flags); zip_size = dict_table_flags_to_zip_size(flags); if (page != 0) { block = buf_page_get(space, zip_size, page, RW_X_LATCH, mtr); header = byte_offset + buf_block_get_frame(block); } ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, latch, MTR_MEMO_X_LOCK)); mtr_x_lock(latch, mtr); if (rw_lock_get_x_lock_count(latch) == 1) { /* This thread did not own the latch before this call: free excess pages from the insert buffer free list */ if (space == IBUF_SPACE_ID) { ibuf_free_excess_pages(); } } if (!has_done_reservation) { success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_NORMAL, mtr); if (!success) { return(NULL); } } space_header = fsp_get_space_header(space, zip_size, mtr); inode = fsp_alloc_seg_inode(space_header, mtr);//申请inode entry 详见 if (inode == NULL) { goto funct_exit; } /* Read the next segment id from space header and increment the value in space header */ seg_id = mach_read_from_8(space_header + FSP_SEG_ID); mlog_write_ull(space_header + FSP_SEG_ID, seg_id + 1, mtr); /** *#define FSEG_ID 0 *#define FSEG_NOT_FULL_N_USED 8 *#define FSEG_FREE 12 *#define FSEG_NOT_FULL (12 + FLST_BASE_NODE_SIZE) *#define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE) *#define FIL_ADDR_SIZE 6 * *#define FSEG_FULL (12 + 2 * FLST_BASE_NODE_SIZE) * */ mlog_write_ull(inode + FSEG_ID, seg_id, mtr); mlog_write_ulint(inode + FSEG_NOT_FULL_N_USED, 0, MLOG_4BYTES, mtr); flst_init(inode + FSEG_FREE, mtr); //初始化inode中的seg list 详见 flst_init(inode + FSEG_NOT_FULL, mtr); flst_init(inode + FSEG_FULL, mtr); mlog_write_ulint(inode + FSEG_MAGIC_N, FSEG_MAGIC_N_VALUE, MLOG_4BYTES, mtr); //#define FSEG_FRAG_ARR_N_SLOTS (FSP_EXTENT_SIZE / 2) 64/2=32 for (i = 0; i < FSEG_FRAG_ARR_N_SLOTS; i++) { fseg_set_nth_frag_page_no(inode, i, FIL_NULL, mtr); //设置frag 碎片 详见 } if (page == 0) { block = fseg_alloc_free_page_low(space, zip_size, inode, 0, FSP_UP, mtr, mtr); if (block == NULL) { fsp_free_seg_inode(space, zip_size, inode, mtr); goto funct_exit; } ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1); header = byte_offset + buf_block_get_frame(block); mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE, FIL_PAGE_TYPE_SYS, MLOG_2BYTES, mtr); } mlog_write_ulint(header + FSEG_HDR_OFFSET, page_offset(inode), MLOG_2BYTES, mtr); mlog_write_ulint(header + FSEG_HDR_PAGE_NO, page_get_page_no(page_align(inode)), MLOG_4BYTES, mtr); mlog_write_ulint(header + FSEG_HDR_SPACE, space, MLOG_4BYTES, mtr); funct_exit: if (!has_done_reservation) { fil_space_release_free_extents(space, n_reserved); } return(block); }
时间: 2024-10-11 05:43:03