001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.procedure.flush;
019
020import java.io.IOException;
021import java.util.HashMap;
022import java.util.HashSet;
023import java.util.List;
024import java.util.Map;
025import java.util.Set;
026import java.util.concurrent.ThreadPoolExecutor;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.hbase.HBaseInterfaceAudience;
029import org.apache.hadoop.hbase.HConstants;
030import org.apache.hadoop.hbase.MetaTableAccessor;
031import org.apache.hadoop.hbase.ServerName;
032import org.apache.hadoop.hbase.TableName;
033import org.apache.hadoop.hbase.client.RegionInfo;
034import org.apache.hadoop.hbase.errorhandling.ForeignException;
035import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
036import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
037import org.apache.hadoop.hbase.master.MasterServices;
038import org.apache.hadoop.hbase.master.MetricsMaster;
039import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
040import org.apache.hadoop.hbase.procedure.Procedure;
041import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
042import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
043import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
044import org.apache.hadoop.hbase.security.User;
045import org.apache.hadoop.hbase.security.access.AccessChecker;
046import org.apache.hadoop.hbase.util.Pair;
047import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
048import org.apache.yetus.audience.InterfaceAudience;
049import org.apache.zookeeper.KeeperException;
050import org.slf4j.Logger;
051import org.slf4j.LoggerFactory;
052
053import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
054
055import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
056import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
057
058@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
059public class MasterFlushTableProcedureManager extends MasterProcedureManager {
060
061  public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
062
063  private static final String FLUSH_TIMEOUT_MILLIS_KEY = "hbase.flush.master.timeoutMillis";
064  private static final int FLUSH_TIMEOUT_MILLIS_DEFAULT = 60000;
065  private static final String FLUSH_WAKE_MILLIS_KEY = "hbase.flush.master.wakeMillis";
066  private static final int FLUSH_WAKE_MILLIS_DEFAULT = 500;
067
068  private static final String FLUSH_PROC_POOL_THREADS_KEY =
069      "hbase.flush.procedure.master.threads";
070  private static final int FLUSH_PROC_POOL_THREADS_DEFAULT = 1;
071
072  private static final Logger LOG = LoggerFactory.getLogger(MasterFlushTableProcedureManager.class);
073
074  private MasterServices master;
075  private ProcedureCoordinator coordinator;
076  private Map<TableName, Procedure> procMap = new HashMap<>();
077  private boolean stopped;
078
079  public MasterFlushTableProcedureManager() {};
080
081  @Override
082  public void stop(String why) {
083    LOG.info("stop: " + why);
084    this.stopped = true;
085  }
086
087  @Override
088  public boolean isStopped() {
089    return this.stopped;
090  }
091
092  @Override
093  public void initialize(MasterServices master, MetricsMaster metricsMaster)
094      throws KeeperException, IOException, UnsupportedOperationException {
095    this.master = master;
096
097    // get the configuration for the coordinator
098    Configuration conf = master.getConfiguration();
099    long wakeFrequency = conf.getInt(FLUSH_WAKE_MILLIS_KEY, FLUSH_WAKE_MILLIS_DEFAULT);
100    long timeoutMillis = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT);
101    int threads = conf.getInt(FLUSH_PROC_POOL_THREADS_KEY, FLUSH_PROC_POOL_THREADS_DEFAULT);
102
103    // setup the procedure coordinator
104    String name = master.getServerName().toString();
105    ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, threads);
106    ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinator(
107        master.getZooKeeper(), getProcedureSignature(), name);
108
109    this.coordinator = new ProcedureCoordinator(comms, tpool, timeoutMillis, wakeFrequency);
110  }
111
112  @Override
113  public String getProcedureSignature() {
114    return FLUSH_TABLE_PROCEDURE_SIGNATURE;
115  }
116
117  @Override
118  public void execProcedure(ProcedureDescription desc) throws IOException {
119
120    TableName tableName = TableName.valueOf(desc.getInstance());
121
122    // call pre coproc hook
123    MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
124    if (cpHost != null) {
125      cpHost.preTableFlush(tableName);
126    }
127
128    // Get the list of region servers that host the online regions for table.
129    // We use the procedure instance name to carry the table name from the client.
130    // It is possible that regions may move after we get the region server list.
131    // Each region server will get its own online regions for the table.
132    // We may still miss regions that need to be flushed.
133    List<Pair<RegionInfo, ServerName>> regionsAndLocations;
134
135    if (TableName.META_TABLE_NAME.equals(tableName)) {
136      regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations(
137        master.getZooKeeper());
138    } else {
139      regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
140        master.getConnection(), tableName, false);
141    }
142
143    Set<String> regionServers = new HashSet<>(regionsAndLocations.size());
144    for (Pair<RegionInfo, ServerName> region : regionsAndLocations) {
145      if (region != null && region.getFirst() != null && region.getSecond() != null) {
146        RegionInfo hri = region.getFirst();
147        if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue;
148        regionServers.add(region.getSecond().toString());
149      }
150    }
151
152    ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance());
153
154    HBaseProtos.NameStringPair family = null;
155    for (HBaseProtos.NameStringPair nsp : desc.getConfigurationList()) {
156      if (HConstants.FAMILY_KEY_STR.equals(nsp.getName())) {
157        family = nsp;
158      }
159    }
160    byte[] procArgs = family != null ? family.toByteArray() : new byte[0];
161
162    // Kick of the global procedure from the master coordinator to the region servers.
163    // We rely on the existing Distributed Procedure framework to prevent any concurrent
164    // procedure with the same name.
165    Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(),
166      procArgs, Lists.newArrayList(regionServers));
167    monitor.rethrowException();
168    if (proc == null) {
169      String msg = "Failed to submit distributed procedure " + desc.getSignature() + " for '"
170          + desc.getInstance() + "'. " + "Another flush procedure is running?";
171      LOG.error(msg);
172      throw new IOException(msg);
173    }
174
175    procMap.put(tableName, proc);
176
177    try {
178      // wait for the procedure to complete.  A timer thread is kicked off that should cancel this
179      // if it takes too long.
180      proc.waitForCompleted();
181      LOG.info("Done waiting - exec procedure " + desc.getSignature() + " for '"
182          + desc.getInstance() + "'");
183      LOG.info("Master flush table procedure is successful!");
184    } catch (InterruptedException e) {
185      ForeignException ee =
186          new ForeignException("Interrupted while waiting for flush table procdure to finish", e);
187      monitor.receive(ee);
188      Thread.currentThread().interrupt();
189    } catch (ForeignException e) {
190      ForeignException ee =
191          new ForeignException("Exception while waiting for flush table procdure to finish", e);
192      monitor.receive(ee);
193    }
194    monitor.rethrowException();
195  }
196
197  @Override
198  public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user)
199      throws IOException {
200    // Done by AccessController as part of preTableFlush coprocessor hook (legacy code path).
201    // In future, when we AC is removed for good, that check should be moved here.
202  }
203
204  @Override
205  public synchronized boolean isProcedureDone(ProcedureDescription desc) throws IOException {
206    // Procedure instance name is the table name.
207    TableName tableName = TableName.valueOf(desc.getInstance());
208    Procedure proc = procMap.get(tableName);
209    if (proc == null) {
210      // The procedure has not even been started yet.
211      // The client would request the procedure and call isProcedureDone().
212      // The HBaseAdmin.execProcedure() wraps both request and isProcedureDone().
213      return false;
214    }
215    // We reply on the existing Distributed Procedure framework to give us the status.
216    return proc.isCompleted();
217  }
218
219}