001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver.storefiletracker; 019 020import java.io.IOException; 021import java.util.Collection; 022import java.util.List; 023import org.apache.hadoop.fs.FileStatus; 024import org.apache.hadoop.fs.Path; 025import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 026import org.apache.hadoop.hbase.io.Reference; 027import org.apache.hadoop.hbase.regionserver.CreateStoreFileWriterParams; 028import org.apache.hadoop.hbase.regionserver.StoreFileInfo; 029import org.apache.hadoop.hbase.regionserver.StoreFileWriter; 030import org.apache.yetus.audience.InterfaceAudience; 031 032/** 033 * An interface to define how we track the store files for a give store. 034 * <p/> 035 * In the old time, we will write store to a tmp directory first, and then rename it to the actual 036 * data file. And once a store file is under data directory, we will consider it as 'committed'. And 037 * we need to do listing when loading store files. 038 * <p/> 039 * When cloud age is coming, now we want to store the store files on object storage, where rename 040 * and list are not as cheap as on HDFS, especially rename. Although introducing a metadata 041 * management layer for object storage could solve the problem, but we still want HBase to run on 042 * pure object storage, so here we introduce this interface to abstract how we track the store 043 * files. For the old implementation, we just persist nothing here, and do listing to load store 044 * files. When running on object storage, we could persist the store file list in a system region, 045 * or in a file on the object storage, to make it possible to write directly into the data directory 046 * to avoid renaming, and also avoid listing when loading store files. 047 * <p/> 048 * The implementation requires to be thread safe as flush and compaction may occur as the same time, 049 * and we could also do multiple compactions at the same time. As the implementation may choose to 050 * persist the store file list to external storage, which could be slow, it is the duty for the 051 * callers to not call it inside a lock which may block normal read/write requests. 052 */ 053@InterfaceAudience.Private 054public interface StoreFileTracker { 055 /** 056 * Load the store files list when opening a region. 057 */ 058 List<StoreFileInfo> load() throws IOException; 059 060 /** 061 * Add new store files. 062 * <p/> 063 * Used for flush and bulk load. 064 */ 065 void add(Collection<StoreFileInfo> newFiles) throws IOException; 066 067 /** 068 * Add new store files and remove compacted store files after compaction. 069 */ 070 void replace(Collection<StoreFileInfo> compactedFiles, Collection<StoreFileInfo> newFiles) 071 throws IOException; 072 073 /** 074 * Set the store files. 075 */ 076 void set(List<StoreFileInfo> files) throws IOException; 077 078 /** 079 * Create a writer for writing new store files. 080 * @return Writer for a new StoreFile 081 */ 082 StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException; 083 084 /** 085 * Adds StoreFileTracker implementations specific configurations into the table descriptor. 086 * <p/> 087 * This is used to avoid accidentally data loss when changing the cluster level store file tracker 088 * implementation, and also possible misconfiguration between master and region servers. 089 * <p/> 090 * See HBASE-26246 for more details. 091 * @param builder The table descriptor builder for the given table. 092 */ 093 TableDescriptorBuilder updateWithTrackerConfigs(TableDescriptorBuilder builder); 094 095 /** 096 * Whether the implementation of this tracker requires you to write to temp directory first, i.e, 097 * does not allow broken store files under the actual data directory. 098 */ 099 boolean requireWritingToTmpDirFirst(); 100 101 Reference createReference(Reference reference, Path path) throws IOException; 102 103 /** 104 * Reads the reference file from the given path. 105 * @param path the {@link Path} to the reference file in the file system. 106 * @return a {@link Reference} that points at top/bottom half of a an hfile 107 */ 108 Reference readReference(Path path) throws IOException; 109 110 /** 111 * Returns true if the specified family has reference files 112 * @return true if family contains reference files 113 */ 114 boolean hasReferences() throws IOException; 115 116 StoreFileInfo getStoreFileInfo(final FileStatus fileStatus, final Path initialPath, 117 final boolean primaryReplica) throws IOException; 118 119 StoreFileInfo getStoreFileInfo(final Path initialPath, final boolean primaryReplica) 120 throws IOException; 121 122}