001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.regionserver;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.io.InterruptedIOException;
023import java.lang.reflect.InvocationTargetException;
024import java.net.BindException;
025import java.net.InetSocketAddress;
026import java.net.UnknownHostException;
027import java.nio.ByteBuffer;
028import java.util.ArrayList;
029import java.util.Arrays;
030import java.util.Collections;
031import java.util.HashMap;
032import java.util.Iterator;
033import java.util.List;
034import java.util.Map;
035import java.util.Map.Entry;
036import java.util.NavigableMap;
037import java.util.Set;
038import java.util.TreeSet;
039import java.util.concurrent.ConcurrentHashMap;
040import java.util.concurrent.ConcurrentMap;
041import java.util.concurrent.TimeUnit;
042import java.util.concurrent.atomic.AtomicBoolean;
043import java.util.concurrent.atomic.AtomicLong;
044import java.util.concurrent.atomic.LongAdder;
045import org.apache.commons.lang3.mutable.MutableObject;
046import org.apache.hadoop.conf.Configuration;
047import org.apache.hadoop.fs.Path;
048import org.apache.hadoop.hbase.ByteBufferExtendedCell;
049import org.apache.hadoop.hbase.CacheEvictionStats;
050import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
051import org.apache.hadoop.hbase.Cell;
052import org.apache.hadoop.hbase.CellScannable;
053import org.apache.hadoop.hbase.CellScanner;
054import org.apache.hadoop.hbase.CellUtil;
055import org.apache.hadoop.hbase.CompareOperator;
056import org.apache.hadoop.hbase.DoNotRetryIOException;
057import org.apache.hadoop.hbase.DroppedSnapshotException;
058import org.apache.hadoop.hbase.HBaseIOException;
059import org.apache.hadoop.hbase.HConstants;
060import org.apache.hadoop.hbase.MultiActionResultTooLarge;
061import org.apache.hadoop.hbase.NotServingRegionException;
062import org.apache.hadoop.hbase.PrivateCellUtil;
063import org.apache.hadoop.hbase.RegionTooBusyException;
064import org.apache.hadoop.hbase.Server;
065import org.apache.hadoop.hbase.ServerName;
066import org.apache.hadoop.hbase.TableName;
067import org.apache.hadoop.hbase.UnknownScannerException;
068import org.apache.hadoop.hbase.client.Append;
069import org.apache.hadoop.hbase.client.ConnectionUtils;
070import org.apache.hadoop.hbase.client.Delete;
071import org.apache.hadoop.hbase.client.Durability;
072import org.apache.hadoop.hbase.client.Get;
073import org.apache.hadoop.hbase.client.Increment;
074import org.apache.hadoop.hbase.client.Mutation;
075import org.apache.hadoop.hbase.client.Put;
076import org.apache.hadoop.hbase.client.RegionInfo;
077import org.apache.hadoop.hbase.client.RegionReplicaUtil;
078import org.apache.hadoop.hbase.client.Result;
079import org.apache.hadoop.hbase.client.Row;
080import org.apache.hadoop.hbase.client.RowMutations;
081import org.apache.hadoop.hbase.client.Scan;
082import org.apache.hadoop.hbase.client.TableDescriptor;
083import org.apache.hadoop.hbase.client.VersionInfoUtil;
084import org.apache.hadoop.hbase.conf.ConfigurationObserver;
085import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
086import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
087import org.apache.hadoop.hbase.exceptions.ScannerResetException;
088import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
089import org.apache.hadoop.hbase.filter.ByteArrayComparable;
090import org.apache.hadoop.hbase.io.TimeRange;
091import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
092import org.apache.hadoop.hbase.ipc.HBaseRpcController;
093import org.apache.hadoop.hbase.ipc.PriorityFunction;
094import org.apache.hadoop.hbase.ipc.QosPriority;
095import org.apache.hadoop.hbase.ipc.RpcCallContext;
096import org.apache.hadoop.hbase.ipc.RpcCallback;
097import org.apache.hadoop.hbase.ipc.RpcScheduler;
098import org.apache.hadoop.hbase.ipc.RpcServer;
099import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
100import org.apache.hadoop.hbase.ipc.RpcServerFactory;
101import org.apache.hadoop.hbase.ipc.RpcServerInterface;
102import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
103import org.apache.hadoop.hbase.ipc.ServerRpcController;
104import org.apache.hadoop.hbase.log.HBaseMarkers;
105import org.apache.hadoop.hbase.master.MasterRpcServices;
106import org.apache.hadoop.hbase.net.Address;
107import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
108import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
109import org.apache.hadoop.hbase.quotas.OperationQuota;
110import org.apache.hadoop.hbase.quotas.QuotaUtil;
111import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
112import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
113import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
114import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
115import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
116import org.apache.hadoop.hbase.regionserver.Leases.Lease;
117import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
118import org.apache.hadoop.hbase.regionserver.Region.Operation;
119import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
120import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
121import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
122import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
123import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
124import org.apache.hadoop.hbase.security.Superusers;
125import org.apache.hadoop.hbase.security.User;
126import org.apache.hadoop.hbase.security.access.AccessChecker;
127import org.apache.hadoop.hbase.security.access.Permission;
128import org.apache.hadoop.hbase.util.Bytes;
129import org.apache.hadoop.hbase.util.DNS;
130import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
131import org.apache.hadoop.hbase.util.Pair;
132import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
133import org.apache.hadoop.hbase.util.Strings;
134import org.apache.hadoop.hbase.wal.WAL;
135import org.apache.hadoop.hbase.wal.WALEdit;
136import org.apache.hadoop.hbase.wal.WALKey;
137import org.apache.hadoop.hbase.wal.WALSplitter;
138import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
139import org.apache.yetus.audience.InterfaceAudience;
140import org.slf4j.Logger;
141import org.slf4j.LoggerFactory;
142
143import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
144import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
145import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
146import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
147import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
148import org.apache.hbase.thirdparty.com.google.protobuf.Message;
149import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
150import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
151import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
152import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
153import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
154
155import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
156import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
157import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
171import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
172import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
173import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
174import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
175import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
176import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
177import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
178import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
179import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
180import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
181import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
182import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
183import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
184import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
185import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest;
186import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
187import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
188import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
189import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
190import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
191import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
192import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
193import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
194import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
195import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
196import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
197import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
198import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
199import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
200import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action;
201import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
202import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
203import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
204import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
205import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
206import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
207import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
208import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
209import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
210import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
211import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
212import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats;
213import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
214import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
215import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
216import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
217import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
218import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
219import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
220import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
221import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
222import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult;
223import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
224import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
225import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
226import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
227import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
228import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
229import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
230import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
231import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
232import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.ScanMetrics;
233import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
234import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
235import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
236import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
237import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
238import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
239import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
240import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
241
242/**
243 * Implements the regionserver RPC services.
244 */
245@InterfaceAudience.Private
246@SuppressWarnings("deprecation")
247public class RSRpcServices implements HBaseRPCErrorHandler,
248    AdminService.BlockingInterface, ClientService.BlockingInterface, PriorityFunction,
249    ConfigurationObserver {
250  protected static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class);
251
252  /** RPC scheduler to use for the region server. */
253  public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS =
254    "hbase.region.server.rpc.scheduler.factory.class";
255
256  /** RPC scheduler to use for the master. */
257  public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS =
258    "hbase.master.rpc.scheduler.factory.class";
259
260  /**
261   * Minimum allowable time limit delta (in milliseconds) that can be enforced during scans. This
262   * configuration exists to prevent the scenario where a time limit is specified to be so
263   * restrictive that the time limit is reached immediately (before any cells are scanned).
264   */
265  private static final String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA =
266      "hbase.region.server.rpc.minimum.scan.time.limit.delta";
267  /**
268   * Default value of {@link RSRpcServices#REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA}
269   */
270  private static final long DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA = 10;
271
272  /**
273   * Number of rows in a batch operation above which a warning will be logged.
274   */
275  static final String BATCH_ROWS_THRESHOLD_NAME = "hbase.rpc.rows.warning.threshold";
276  /**
277   * Default value of {@link RSRpcServices#BATCH_ROWS_THRESHOLD_NAME}
278   */
279  static final int BATCH_ROWS_THRESHOLD_DEFAULT = 5000;
280
281  protected static final String RESERVOIR_ENABLED_KEY = "hbase.ipc.server.reservoir.enabled";
282
283  // Request counter. (Includes requests that are not serviced by regions.)
284  // Count only once for requests with multiple actions like multi/caching-scan/replayBatch
285  final LongAdder requestCount = new LongAdder();
286
287  // Request counter for rpc get
288  final LongAdder rpcGetRequestCount = new LongAdder();
289
290  // Request counter for rpc scan
291  final LongAdder rpcScanRequestCount = new LongAdder();
292
293  // Request counter for rpc multi
294  final LongAdder rpcMultiRequestCount = new LongAdder();
295
296  // Request counter for rpc mutate
297  final LongAdder rpcMutateRequestCount = new LongAdder();
298
299  // Server to handle client requests.
300  final RpcServerInterface rpcServer;
301  final InetSocketAddress isa;
302
303  private final HRegionServer regionServer;
304  private final long maxScannerResultSize;
305
306  // The reference to the priority extraction function
307  private final PriorityFunction priority;
308
309  private ScannerIdGenerator scannerIdGenerator;
310  private final ConcurrentMap<String, RegionScannerHolder> scanners = new ConcurrentHashMap<>();
311  // Hold the name of a closed scanner for a while. This is used to keep compatible for old clients
312  // which may send next or close request to a region scanner which has already been exhausted. The
313  // entries will be removed automatically after scannerLeaseTimeoutPeriod.
314  private final Cache<String, String> closedScanners;
315  /**
316   * The lease timeout period for client scanners (milliseconds).
317   */
318  private final int scannerLeaseTimeoutPeriod;
319
320  /**
321   * The RPC timeout period (milliseconds)
322   */
323  private final int rpcTimeout;
324
325  /**
326   * The minimum allowable delta to use for the scan limit
327   */
328  private final long minimumScanTimeLimitDelta;
329
330  /**
331   * Row size threshold for multi requests above which a warning is logged
332   */
333  private final int rowSizeWarnThreshold;
334
335  final AtomicBoolean clearCompactionQueues = new AtomicBoolean(false);
336
337  // We want to vet all accesses at the point of entry itself; limiting scope of access checker
338  // instance to only this class to prevent its use from spreading deeper into implementation.
339  // Initialized in start() since AccessChecker needs ZKWatcher which is created by HRegionServer
340  // after RSRpcServices constructor and before start() is called.
341  // Initialized only if authorization is enabled, else remains null.
342  protected AccessChecker accessChecker;
343
344  /**
345   * Services launched in RSRpcServices. By default they are on but you can use the below
346   * booleans to selectively enable/disable either Admin or Client Service (Rare is the case
347   * where you would ever turn off one or the other).
348   */
349  public static final String REGIONSERVER_ADMIN_SERVICE_CONFIG =
350      "hbase.regionserver.admin.executorService";
351  public static final String REGIONSERVER_CLIENT_SERVICE_CONFIG =
352      "hbase.regionserver.client.executorService";
353
354  /**
355   * An Rpc callback for closing a RegionScanner.
356   */
357  private static final class RegionScannerCloseCallBack implements RpcCallback {
358
359    private final RegionScanner scanner;
360
361    public RegionScannerCloseCallBack(RegionScanner scanner) {
362      this.scanner = scanner;
363    }
364
365    @Override
366    public void run() throws IOException {
367      this.scanner.close();
368    }
369  }
370
371  /**
372   * An Rpc callback for doing shipped() call on a RegionScanner.
373   */
374  private class RegionScannerShippedCallBack implements RpcCallback {
375
376    private final String scannerName;
377    private final Shipper shipper;
378    private final Lease lease;
379
380    public RegionScannerShippedCallBack(String scannerName, Shipper shipper, Lease lease) {
381      this.scannerName = scannerName;
382      this.shipper = shipper;
383      this.lease = lease;
384    }
385
386    @Override
387    public void run() throws IOException {
388      this.shipper.shipped();
389      // We're done. On way out re-add the above removed lease. The lease was temp removed for this
390      // Rpc call and we are at end of the call now. Time to add it back.
391      if (scanners.containsKey(scannerName)) {
392        if (lease != null) regionServer.leases.addLease(lease);
393      }
394    }
395  }
396
397  /**
398   * An RpcCallBack that creates a list of scanners that needs to perform callBack operation on
399   * completion of multiGets.
400   */
401   static class RegionScannersCloseCallBack implements RpcCallback {
402    private final List<RegionScanner> scanners = new ArrayList<>();
403
404    public void addScanner(RegionScanner scanner) {
405      this.scanners.add(scanner);
406    }
407
408    @Override
409    public void run() {
410      for (RegionScanner scanner : scanners) {
411        try {
412          scanner.close();
413        } catch (IOException e) {
414          LOG.error("Exception while closing the scanner " + scanner, e);
415        }
416      }
417    }
418  }
419
420  /**
421   * Holder class which holds the RegionScanner, nextCallSeq and RpcCallbacks together.
422   */
423  private static final class RegionScannerHolder {
424
425    private final AtomicLong nextCallSeq = new AtomicLong(0);
426    private final String scannerName;
427    private final RegionScanner s;
428    private final HRegion r;
429    private final RpcCallback closeCallBack;
430    private final RpcCallback shippedCallback;
431    private byte[] rowOfLastPartialResult;
432    private boolean needCursor;
433
434    public RegionScannerHolder(String scannerName, RegionScanner s, HRegion r,
435        RpcCallback closeCallBack, RpcCallback shippedCallback, boolean needCursor) {
436      this.scannerName = scannerName;
437      this.s = s;
438      this.r = r;
439      this.closeCallBack = closeCallBack;
440      this.shippedCallback = shippedCallback;
441      this.needCursor = needCursor;
442    }
443
444    public long getNextCallSeq() {
445      return nextCallSeq.get();
446    }
447
448    public boolean incNextCallSeq(long currentSeq) {
449      // Use CAS to prevent multiple scan request running on the same scanner.
450      return nextCallSeq.compareAndSet(currentSeq, currentSeq + 1);
451    }
452  }
453
454  /**
455   * Instantiated as a scanner lease. If the lease times out, the scanner is
456   * closed
457   */
458  private class ScannerListener implements LeaseListener {
459    private final String scannerName;
460
461    ScannerListener(final String n) {
462      this.scannerName = n;
463    }
464
465    @Override
466    public void leaseExpired() {
467      RegionScannerHolder rsh = scanners.remove(this.scannerName);
468      if (rsh != null) {
469        RegionScanner s = rsh.s;
470        LOG.info("Scanner " + this.scannerName + " lease expired on region "
471          + s.getRegionInfo().getRegionNameAsString());
472        HRegion region = null;
473        try {
474          region = regionServer.getRegion(s.getRegionInfo().getRegionName());
475          if (region != null && region.getCoprocessorHost() != null) {
476            region.getCoprocessorHost().preScannerClose(s);
477          }
478        } catch (IOException e) {
479          LOG.error("Closing scanner for " + s.getRegionInfo().getRegionNameAsString(), e);
480        } finally {
481          try {
482            s.close();
483            if (region != null && region.getCoprocessorHost() != null) {
484              region.getCoprocessorHost().postScannerClose(s);
485            }
486          } catch (IOException e) {
487            LOG.error("Closing scanner for " + s.getRegionInfo().getRegionNameAsString(), e);
488          }
489        }
490      } else {
491        LOG.warn("Scanner " + this.scannerName + " lease expired, but no related" +
492          " scanner found, hence no chance to close that related scanner!");
493      }
494    }
495  }
496
497  private static ResultOrException getResultOrException(final ClientProtos.Result r,
498                                                        final int index){
499    return getResultOrException(ResponseConverter.buildActionResult(r), index);
500  }
501
502  private static ResultOrException getResultOrException(final Exception e, final int index) {
503    return getResultOrException(ResponseConverter.buildActionResult(e), index);
504  }
505
506  private static ResultOrException getResultOrException(
507      final ResultOrException.Builder builder, final int index) {
508    return builder.setIndex(index).build();
509  }
510
511  /**
512   * Checks for the following pre-checks in order:
513   * <ol>
514   *   <li>RegionServer is running</li>
515   *   <li>If authorization is enabled, then RPC caller has ADMIN permissions</li>
516   * </ol>
517   * @param requestName name of rpc request. Used in reporting failures to provide context.
518   * @throws ServiceException If any of the above listed pre-check fails.
519   */
520  private void rpcPreCheck(String requestName) throws ServiceException {
521    try {
522      checkOpen();
523      requirePermission(requestName, Permission.Action.ADMIN);
524    } catch (IOException ioe) {
525      throw new ServiceException(ioe);
526    }
527  }
528
529  /**
530   * Starts the nonce operation for a mutation, if needed.
531   * @param mutation Mutation.
532   * @param nonceGroup Nonce group from the request.
533   * @returns whether to proceed this mutation.
534   */
535  private boolean startNonceOperation(final MutationProto mutation, long nonceGroup)
536      throws IOException {
537    if (regionServer.nonceManager == null || !mutation.hasNonce()) return true;
538    boolean canProceed = false;
539    try {
540      canProceed = regionServer.nonceManager.startOperation(
541        nonceGroup, mutation.getNonce(), regionServer);
542    } catch (InterruptedException ex) {
543      throw new InterruptedIOException("Nonce start operation interrupted");
544    }
545    return canProceed;
546  }
547
548  /**
549   * Ends nonce operation for a mutation, if needed.
550   * @param mutation Mutation.
551   * @param nonceGroup Nonce group from the request. Always 0 in initial implementation.
552   * @param success Whether the operation for this nonce has succeeded.
553   */
554  private void endNonceOperation(final MutationProto mutation,
555      long nonceGroup, boolean success) {
556    if (regionServer.nonceManager != null && mutation.hasNonce()) {
557      regionServer.nonceManager.endOperation(nonceGroup, mutation.getNonce(), success);
558    }
559  }
560
561  private boolean isClientCellBlockSupport(RpcCallContext context) {
562    return context != null && context.isClientCellBlockSupported();
563  }
564
565  private void addResult(final MutateResponse.Builder builder, final Result result,
566      final HBaseRpcController rpcc, boolean clientCellBlockSupported) {
567    if (result == null) return;
568    if (clientCellBlockSupported) {
569      builder.setResult(ProtobufUtil.toResultNoData(result));
570      rpcc.setCellScanner(result.cellScanner());
571    } else {
572      ClientProtos.Result pbr = ProtobufUtil.toResult(result);
573      builder.setResult(pbr);
574    }
575  }
576
577  private void addResults(ScanResponse.Builder builder, List<Result> results,
578      HBaseRpcController controller, boolean isDefaultRegion, boolean clientCellBlockSupported) {
579    builder.setStale(!isDefaultRegion);
580    if (results.isEmpty()) {
581      return;
582    }
583    if (clientCellBlockSupported) {
584      for (Result res : results) {
585        builder.addCellsPerResult(res.size());
586        builder.addPartialFlagPerResult(res.mayHaveMoreCellsInRow());
587      }
588      controller.setCellScanner(CellUtil.createCellScanner(results));
589    } else {
590      for (Result res : results) {
591        ClientProtos.Result pbr = ProtobufUtil.toResult(res);
592        builder.addResults(pbr);
593      }
594    }
595  }
596
597  /**
598   * Mutate a list of rows atomically.
599   * @param cellScanner if non-null, the mutation data -- the Cell content.
600   */
601  private boolean checkAndRowMutate(final HRegion region, final List<ClientProtos.Action> actions,
602    final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
603    ByteArrayComparable comparator, TimeRange timeRange, RegionActionResult.Builder builder,
604    ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
605    int countOfCompleteMutation = 0;
606    try {
607      if (!region.getRegionInfo().isMetaRegion()) {
608        regionServer.cacheFlusher.reclaimMemStoreMemory();
609      }
610      RowMutations rm = null;
611      int i = 0;
612      ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder =
613        ClientProtos.ResultOrException.newBuilder();
614      for (ClientProtos.Action action: actions) {
615        if (action.hasGet()) {
616          throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" +
617            action.getGet());
618        }
619        MutationType type = action.getMutation().getMutateType();
620        if (rm == null) {
621          rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
622        }
623        switch (type) {
624          case PUT:
625            Put put = ProtobufUtil.toPut(action.getMutation(), cellScanner);
626            ++countOfCompleteMutation;
627            checkCellSizeLimit(region, put);
628            spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
629            rm.add(put);
630            break;
631          case DELETE:
632            Delete del = ProtobufUtil.toDelete(action.getMutation(), cellScanner);
633            ++countOfCompleteMutation;
634            spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
635            rm.add(del);
636            break;
637          default:
638            throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
639        }
640        // To unify the response format with doNonAtomicRegionMutation and read through client's
641        // AsyncProcess we have to add an empty result instance per operation
642        resultOrExceptionOrBuilder.clear();
643        resultOrExceptionOrBuilder.setIndex(i++);
644        builder.addResultOrException(
645          resultOrExceptionOrBuilder.build());
646      }
647      return region.checkAndRowMutate(row, family, qualifier, op, comparator, timeRange, rm);
648    } finally {
649      // Currently, the checkAndMutate isn't supported by batch so it won't mess up the cell scanner
650      // even if the malformed cells are not skipped.
651      for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
652        skipCellsForMutation(actions.get(i), cellScanner);
653      }
654    }
655  }
656
657  /**
658   * Execute an append mutation.
659   *
660   * @return result to return to client if default operation should be
661   * bypassed as indicated by RegionObserver, null otherwise
662   */
663  private Result append(final HRegion region, final OperationQuota quota,
664      final MutationProto mutation, final CellScanner cellScanner, long nonceGroup,
665      ActivePolicyEnforcement spaceQuota)
666      throws IOException {
667    long before = EnvironmentEdgeManager.currentTime();
668    Append append = ProtobufUtil.toAppend(mutation, cellScanner);
669    checkCellSizeLimit(region, append);
670    spaceQuota.getPolicyEnforcement(region).check(append);
671    quota.addMutation(append);
672    Result r = null;
673    if (region.getCoprocessorHost() != null) {
674      r = region.getCoprocessorHost().preAppend(append);
675    }
676    if (r == null) {
677      boolean canProceed = startNonceOperation(mutation, nonceGroup);
678      boolean success = false;
679      try {
680        long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
681        if (canProceed) {
682          r = region.append(append, nonceGroup, nonce);
683        } else {
684          // convert duplicate append to get
685          List<Cell> results = region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
686              nonceGroup, nonce);
687          r = Result.create(results);
688        }
689        success = true;
690      } finally {
691        if (canProceed) {
692          endNonceOperation(mutation, nonceGroup, success);
693        }
694      }
695      if (region.getCoprocessorHost() != null) {
696        r = region.getCoprocessorHost().postAppend(append, r);
697      }
698    }
699    if (regionServer.metricsRegionServer != null) {
700      regionServer.metricsRegionServer.updateAppend(
701          region.getTableDescriptor().getTableName(),
702        EnvironmentEdgeManager.currentTime() - before);
703    }
704    return r == null ? Result.EMPTY_RESULT : r;
705  }
706
707  /**
708   * Execute an increment mutation.
709   *
710   * @param region
711   * @param mutation
712   * @return the Result
713   * @throws IOException
714   */
715  private Result increment(final HRegion region, final OperationQuota quota,
716      final MutationProto mutation, final CellScanner cells, long nonceGroup,
717      ActivePolicyEnforcement spaceQuota)
718      throws IOException {
719    long before = EnvironmentEdgeManager.currentTime();
720    Increment increment = ProtobufUtil.toIncrement(mutation, cells);
721    checkCellSizeLimit(region, increment);
722    spaceQuota.getPolicyEnforcement(region).check(increment);
723    quota.addMutation(increment);
724    Result r = null;
725    if (region.getCoprocessorHost() != null) {
726      r = region.getCoprocessorHost().preIncrement(increment);
727    }
728    if (r == null) {
729      boolean canProceed = startNonceOperation(mutation, nonceGroup);
730      boolean success = false;
731      try {
732        long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
733        if (canProceed) {
734          r = region.increment(increment, nonceGroup, nonce);
735        } else {
736          // convert duplicate increment to get
737          List<Cell> results = region.get(ProtobufUtil.toGet(mutation, cells), false, nonceGroup,
738              nonce);
739          r = Result.create(results);
740        }
741        success = true;
742      } finally {
743        if (canProceed) {
744          endNonceOperation(mutation, nonceGroup, success);
745        }
746      }
747      if (region.getCoprocessorHost() != null) {
748        r = region.getCoprocessorHost().postIncrement(increment, r);
749      }
750    }
751    if (regionServer.metricsRegionServer != null) {
752      regionServer.metricsRegionServer.updateIncrement(
753          region.getTableDescriptor().getTableName(),
754          EnvironmentEdgeManager.currentTime() - before);
755    }
756    return r == null ? Result.EMPTY_RESULT : r;
757  }
758
759  /**
760   * Run through the regionMutation <code>rm</code> and per Mutation, do the work, and then when
761   * done, add an instance of a {@link ResultOrException} that corresponds to each Mutation.
762   * @param cellsToReturn  Could be null. May be allocated in this method.  This is what this
763   * method returns as a 'result'.
764   * @param closeCallBack the callback to be used with multigets
765   * @param context the current RpcCallContext
766   * @return Return the <code>cellScanner</code> passed
767   */
768  private List<CellScannable> doNonAtomicRegionMutation(final HRegion region,
769      final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner,
770      final RegionActionResult.Builder builder, List<CellScannable> cellsToReturn, long nonceGroup,
771      final RegionScannersCloseCallBack closeCallBack, RpcCallContext context,
772      ActivePolicyEnforcement spaceQuotaEnforcement) {
773    // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
774    // one at a time, we instead pass them in batch.  Be aware that the corresponding
775    // ResultOrException instance that matches each Put or Delete is then added down in the
776    // doNonAtomicBatchOp call.  We should be staying aligned though the Put and Delete are
777    // deferred/batched
778    List<ClientProtos.Action> mutations = null;
779    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
780    IOException sizeIOE = null;
781    Object lastBlock = null;
782    ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
783    boolean hasResultOrException = false;
784    for (ClientProtos.Action action : actions.getActionList()) {
785      hasResultOrException = false;
786      resultOrExceptionBuilder.clear();
787      try {
788        Result r = null;
789
790        if (context != null
791            && context.isRetryImmediatelySupported()
792            && (context.getResponseCellSize() > maxQuotaResultSize
793              || context.getResponseBlockSize() + context.getResponseExceptionSize()
794              > maxQuotaResultSize)) {
795
796          // We're storing the exception since the exception and reason string won't
797          // change after the response size limit is reached.
798          if (sizeIOE == null ) {
799            // We don't need the stack un-winding do don't throw the exception.
800            // Throwing will kill the JVM's JIT.
801            //
802            // Instead just create the exception and then store it.
803            sizeIOE = new MultiActionResultTooLarge("Max size exceeded"
804                + " CellSize: " + context.getResponseCellSize()
805                + " BlockSize: " + context.getResponseBlockSize());
806
807            // Only report the exception once since there's only one request that
808            // caused the exception. Otherwise this number will dominate the exceptions count.
809            rpcServer.getMetrics().exception(sizeIOE);
810          }
811
812          // Now that there's an exception is known to be created
813          // use it for the response.
814          //
815          // This will create a copy in the builder.
816          NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
817          resultOrExceptionBuilder.setException(pair);
818          context.incrementResponseExceptionSize(pair.getSerializedSize());
819          resultOrExceptionBuilder.setIndex(action.getIndex());
820          builder.addResultOrException(resultOrExceptionBuilder.build());
821          skipCellsForMutation(action, cellScanner);
822          continue;
823        }
824        if (action.hasGet()) {
825          long before = EnvironmentEdgeManager.currentTime();
826          ClientProtos.Get pbGet = action.getGet();
827          // An asynchbase client, https://github.com/OpenTSDB/asynchbase, starts by trying to do
828          // a get closest before. Throwing the UnknownProtocolException signals it that it needs
829          // to switch and do hbase2 protocol (HBase servers do not tell clients what versions
830          // they are; its a problem for non-native clients like asynchbase. HBASE-20225.
831          if (pbGet.hasClosestRowBefore() && pbGet.getClosestRowBefore()) {
832            throw new UnknownProtocolException("Is this a pre-hbase-1.0.0 or asynchbase client? " +
833                "Client is invoking getClosestRowBefore removed in hbase-2.0.0 replaced by " +
834                "reverse Scan.");
835          }
836          try {
837            Get get = ProtobufUtil.toGet(pbGet);
838            if (context != null) {
839              r = get(get, (region), closeCallBack, context);
840            } else {
841              r = region.get(get);
842            }
843          } finally {
844            if (regionServer.metricsRegionServer != null) {
845              regionServer.metricsRegionServer.updateGet(
846                  region.getTableDescriptor().getTableName(),
847                  EnvironmentEdgeManager.currentTime() - before);
848            }
849          }
850        } else if (action.hasServiceCall()) {
851          hasResultOrException = true;
852          com.google.protobuf.Message result =
853            execServiceOnRegion(region, action.getServiceCall());
854          ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder =
855            ClientProtos.CoprocessorServiceResult.newBuilder();
856          resultOrExceptionBuilder.setServiceResult(
857            serviceResultBuilder.setValue(
858              serviceResultBuilder.getValueBuilder()
859                .setName(result.getClass().getName())
860                // TODO: Copy!!!
861                .setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
862        } else if (action.hasMutation()) {
863          MutationType type = action.getMutation().getMutateType();
864          if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null &&
865              !mutations.isEmpty()) {
866            // Flush out any Puts or Deletes already collected.
867            doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner,
868              spaceQuotaEnforcement);
869            mutations.clear();
870          }
871          switch (type) {
872            case APPEND:
873              r = append(region, quota, action.getMutation(), cellScanner, nonceGroup,
874                  spaceQuotaEnforcement);
875              break;
876            case INCREMENT:
877              r = increment(region, quota, action.getMutation(), cellScanner, nonceGroup,
878                  spaceQuotaEnforcement);
879              break;
880            case PUT:
881            case DELETE:
882              // Collect the individual mutations and apply in a batch
883              if (mutations == null) {
884                mutations = new ArrayList<>(actions.getActionCount());
885              }
886              mutations.add(action);
887              break;
888            default:
889              throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
890          }
891        } else {
892          throw new HBaseIOException("Unexpected Action type");
893        }
894        if (r != null) {
895          ClientProtos.Result pbResult = null;
896          if (isClientCellBlockSupport(context)) {
897            pbResult = ProtobufUtil.toResultNoData(r);
898            //  Hard to guess the size here.  Just make a rough guess.
899            if (cellsToReturn == null) {
900              cellsToReturn = new ArrayList<>();
901            }
902            cellsToReturn.add(r);
903          } else {
904            pbResult = ProtobufUtil.toResult(r);
905          }
906          lastBlock = addSize(context, r, lastBlock);
907          hasResultOrException = true;
908          resultOrExceptionBuilder.setResult(pbResult);
909        }
910        // Could get to here and there was no result and no exception.  Presumes we added
911        // a Put or Delete to the collecting Mutations List for adding later.  In this
912        // case the corresponding ResultOrException instance for the Put or Delete will be added
913        // down in the doNonAtomicBatchOp method call rather than up here.
914      } catch (IOException ie) {
915        rpcServer.getMetrics().exception(ie);
916        hasResultOrException = true;
917        NameBytesPair pair = ResponseConverter.buildException(ie);
918        resultOrExceptionBuilder.setException(pair);
919        context.incrementResponseExceptionSize(pair.getSerializedSize());
920      }
921      if (hasResultOrException) {
922        // Propagate index.
923        resultOrExceptionBuilder.setIndex(action.getIndex());
924        builder.addResultOrException(resultOrExceptionBuilder.build());
925      }
926    }
927    // Finish up any outstanding mutations
928    if (!CollectionUtils.isEmpty(mutations)) {
929      doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement);
930    }
931    return cellsToReturn;
932  }
933
934  private void checkCellSizeLimit(final HRegion r, final Mutation m) throws IOException {
935    if (r.maxCellSize > 0) {
936      CellScanner cells = m.cellScanner();
937      while (cells.advance()) {
938        int size = PrivateCellUtil.estimatedSerializedSizeOf(cells.current());
939        if (size > r.maxCellSize) {
940          String msg = "Cell with size " + size + " exceeds limit of " + r.maxCellSize + " bytes";
941          if (LOG.isDebugEnabled()) {
942            LOG.debug(msg);
943          }
944          throw new DoNotRetryIOException(msg);
945        }
946      }
947    }
948  }
949
950  private void doAtomicBatchOp(final RegionActionResult.Builder builder, final HRegion region,
951    final OperationQuota quota, final List<ClientProtos.Action> mutations,
952    final CellScanner cells, ActivePolicyEnforcement spaceQuotaEnforcement)
953    throws IOException {
954    // Just throw the exception. The exception will be caught and then added to region-level
955    // exception for RegionAction. Leaving the null to action result is ok since the null
956    // result is viewed as failure by hbase client. And the region-lever exception will be used
957    // to replaced the null result. see AsyncRequestFutureImpl#receiveMultiAction and
958    // AsyncBatchRpcRetryingCaller#onComplete for more details.
959    doBatchOp(builder, region, quota, mutations, cells, spaceQuotaEnforcement, true);
960  }
961
962  private void doNonAtomicBatchOp(final RegionActionResult.Builder builder, final HRegion region,
963    final OperationQuota quota, final List<ClientProtos.Action> mutations,
964    final CellScanner cells, ActivePolicyEnforcement spaceQuotaEnforcement) {
965    try {
966      doBatchOp(builder, region, quota, mutations, cells, spaceQuotaEnforcement, false);
967    } catch (IOException e) {
968      // Set the exception for each action. The mutations in same RegionAction are group to
969      // different batch and then be processed individually. Hence, we don't set the region-level
970      // exception here for whole RegionAction.
971      for (Action mutation : mutations) {
972        builder.addResultOrException(getResultOrException(e, mutation.getIndex()));
973      }
974    }
975  }
976
977  /**
978   * Execute a list of Put/Delete mutations.
979   *
980   * @param builder
981   * @param region
982   * @param mutations
983   */
984  private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region,
985      final OperationQuota quota, final List<ClientProtos.Action> mutations,
986      final CellScanner cells, ActivePolicyEnforcement spaceQuotaEnforcement, boolean atomic)
987      throws IOException {
988    Mutation[] mArray = new Mutation[mutations.size()];
989    long before = EnvironmentEdgeManager.currentTime();
990    boolean batchContainsPuts = false, batchContainsDelete = false;
991    try {
992      /** HBASE-17924
993       * mutationActionMap is a map to map the relation between mutations and actions
994       * since mutation array may have been reoredered.In order to return the right
995       * result or exception to the corresponding actions, We need to know which action
996       * is the mutation belong to. We can't sort ClientProtos.Action array, since they
997       * are bonded to cellscanners.
998       */
999      Map<Mutation, ClientProtos.Action> mutationActionMap = new HashMap<>();
1000      int i = 0;
1001      for (ClientProtos.Action action: mutations) {
1002        if (action.hasGet()) {
1003          throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" +
1004            action.getGet());
1005        }
1006        MutationProto m = action.getMutation();
1007        Mutation mutation;
1008        if (m.getMutateType() == MutationType.PUT) {
1009          mutation = ProtobufUtil.toPut(m, cells);
1010          batchContainsPuts = true;
1011        } else {
1012          mutation = ProtobufUtil.toDelete(m, cells);
1013          batchContainsDelete = true;
1014        }
1015        mutationActionMap.put(mutation, action);
1016        mArray[i++] = mutation;
1017        checkCellSizeLimit(region, mutation);
1018        // Check if a space quota disallows this mutation
1019        spaceQuotaEnforcement.getPolicyEnforcement(region).check(mutation);
1020        quota.addMutation(mutation);
1021      }
1022
1023      if (!region.getRegionInfo().isMetaRegion()) {
1024        regionServer.cacheFlusher.reclaimMemStoreMemory();
1025      }
1026
1027      // HBASE-17924
1028      // Sort to improve lock efficiency for non-atomic batch of operations. If atomic
1029      // order is preserved as its expected from the client
1030      if (!atomic) {
1031        Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2));
1032      }
1033
1034      OperationStatus[] codes = region.batchMutate(mArray, atomic, HConstants.NO_NONCE,
1035        HConstants.NO_NONCE);
1036      for (i = 0; i < codes.length; i++) {
1037        Mutation currentMutation = mArray[i];
1038        ClientProtos.Action currentAction = mutationActionMap.get(currentMutation);
1039        int index = currentAction.hasIndex() || !atomic ? currentAction.getIndex() : i;
1040        Exception e = null;
1041        switch (codes[i].getOperationStatusCode()) {
1042          case BAD_FAMILY:
1043            e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
1044            builder.addResultOrException(getResultOrException(e, index));
1045            break;
1046
1047          case SANITY_CHECK_FAILURE:
1048            e = new FailedSanityCheckException(codes[i].getExceptionMsg());
1049            builder.addResultOrException(getResultOrException(e, index));
1050            break;
1051
1052          default:
1053            e = new DoNotRetryIOException(codes[i].getExceptionMsg());
1054            builder.addResultOrException(getResultOrException(e, index));
1055            break;
1056
1057          case SUCCESS:
1058            builder.addResultOrException(getResultOrException(
1059              ClientProtos.Result.getDefaultInstance(), index));
1060            break;
1061
1062          case STORE_TOO_BUSY:
1063            e = new RegionTooBusyException(codes[i].getExceptionMsg());
1064            builder.addResultOrException(getResultOrException(e, index));
1065            break;
1066        }
1067      }
1068    } finally {
1069      int processedMutationIndex = 0;
1070      for (Action mutation : mutations) {
1071        // The non-null mArray[i] means the cell scanner has been read.
1072        if (mArray[processedMutationIndex++] == null) {
1073          skipCellsForMutation(mutation, cells);
1074        }
1075      }
1076      updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
1077    }
1078  }
1079
1080  private void updateMutationMetrics(HRegion region, long starttime, boolean batchContainsPuts,
1081    boolean batchContainsDelete) {
1082    if (regionServer.metricsRegionServer != null) {
1083      long after = EnvironmentEdgeManager.currentTime();
1084      if (batchContainsPuts) {
1085        regionServer.metricsRegionServer
1086          .updatePutBatch(region.getTableDescriptor().getTableName(), after - starttime);
1087      }
1088      if (batchContainsDelete) {
1089        regionServer.metricsRegionServer
1090          .updateDeleteBatch(region.getTableDescriptor().getTableName(), after - starttime);
1091      }
1092    }
1093  }
1094
1095  /**
1096   * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
1097   * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
1098   * @param region
1099   * @param mutations
1100   * @param replaySeqId
1101   * @return an array of OperationStatus which internally contains the OperationStatusCode and the
1102   *         exceptionMessage if any
1103   * @throws IOException
1104   */
1105  private OperationStatus [] doReplayBatchOp(final HRegion region,
1106      final List<WALSplitter.MutationReplay> mutations, long replaySeqId) throws IOException {
1107    long before = EnvironmentEdgeManager.currentTime();
1108    boolean batchContainsPuts = false, batchContainsDelete = false;
1109    try {
1110      for (Iterator<WALSplitter.MutationReplay> it = mutations.iterator(); it.hasNext();) {
1111        WALSplitter.MutationReplay m = it.next();
1112
1113        if (m.type == MutationType.PUT) {
1114          batchContainsPuts = true;
1115        } else {
1116          batchContainsDelete = true;
1117        }
1118
1119        NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
1120        List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
1121        if (metaCells != null && !metaCells.isEmpty()) {
1122          for (Cell metaCell : metaCells) {
1123            CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
1124            boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
1125            HRegion hRegion = region;
1126            if (compactionDesc != null) {
1127              // replay the compaction. Remove the files from stores only if we are the primary
1128              // region replica (thus own the files)
1129              hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica,
1130                replaySeqId);
1131              continue;
1132            }
1133            FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
1134            if (flushDesc != null && !isDefaultReplica) {
1135              hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
1136              continue;
1137            }
1138            RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
1139            if (regionEvent != null && !isDefaultReplica) {
1140              hRegion.replayWALRegionEventMarker(regionEvent);
1141              continue;
1142            }
1143            BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
1144            if (bulkLoadEvent != null) {
1145              hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
1146              continue;
1147            }
1148          }
1149          it.remove();
1150        }
1151      }
1152      requestCount.increment();
1153      if (!region.getRegionInfo().isMetaRegion()) {
1154        regionServer.cacheFlusher.reclaimMemStoreMemory();
1155      }
1156      return region.batchReplay(mutations.toArray(
1157        new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
1158    } finally {
1159      updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
1160    }
1161  }
1162
1163  private void closeAllScanners() {
1164    // Close any outstanding scanners. Means they'll get an UnknownScanner
1165    // exception next time they come in.
1166    for (Map.Entry<String, RegionScannerHolder> e : scanners.entrySet()) {
1167      try {
1168        e.getValue().s.close();
1169      } catch (IOException ioe) {
1170        LOG.warn("Closing scanner " + e.getKey(), ioe);
1171      }
1172    }
1173  }
1174
1175  // Exposed for testing
1176  interface LogDelegate {
1177    void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold);
1178  }
1179
1180  private static LogDelegate DEFAULT_LOG_DELEGATE = new LogDelegate() {
1181    @Override
1182    public void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold) {
1183      if (LOG.isWarnEnabled()) {
1184        LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold
1185            + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: "
1186            + RpcServer.getRequestUserName().orElse(null) + "/"
1187            + RpcServer.getRemoteAddress().orElse(null)
1188            + " first region in multi=" + firstRegionName);
1189      }
1190    }
1191  };
1192
1193  private final LogDelegate ld;
1194
1195  public RSRpcServices(HRegionServer rs) throws IOException {
1196    this(rs, DEFAULT_LOG_DELEGATE);
1197  }
1198
1199  // Directly invoked only for testing
1200  RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException {
1201    this.ld = ld;
1202    regionServer = rs;
1203    rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT);
1204    RpcSchedulerFactory rpcSchedulerFactory;
1205    try {
1206      rpcSchedulerFactory = getRpcSchedulerFactoryClass().asSubclass(RpcSchedulerFactory.class)
1207          .getDeclaredConstructor().newInstance();
1208    } catch (NoSuchMethodException | InvocationTargetException |
1209        InstantiationException | IllegalAccessException e) {
1210      throw new IllegalArgumentException(e);
1211    }
1212    // Server to handle client requests.
1213    InetSocketAddress initialIsa;
1214    InetSocketAddress bindAddress;
1215    if(this instanceof MasterRpcServices) {
1216      String hostname = getHostname(rs.conf, true);
1217      int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
1218      // Creation of a HSA will force a resolve.
1219      initialIsa = new InetSocketAddress(hostname, port);
1220      bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port);
1221    } else {
1222      String hostname = getHostname(rs.conf, false);
1223      int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT,
1224        HConstants.DEFAULT_REGIONSERVER_PORT);
1225      // Creation of a HSA will force a resolve.
1226      initialIsa = new InetSocketAddress(hostname, port);
1227      bindAddress = new InetSocketAddress(
1228        rs.conf.get("hbase.regionserver.ipc.address", hostname), port);
1229    }
1230    if (initialIsa.getAddress() == null) {
1231      throw new IllegalArgumentException("Failed resolve of " + initialIsa);
1232    }
1233    priority = createPriority();
1234    // Using Address means we don't get the IP too. Shorten it more even to just the host name
1235    // w/o the domain.
1236    String name = rs.getProcessName() + "/" +
1237        Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain();
1238    // Set how many times to retry talking to another server over Connection.
1239    ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
1240    rpcServer = createRpcServer(rs, rs.conf, rpcSchedulerFactory, bindAddress, name);
1241    rpcServer.setRsRpcServices(this);
1242    scannerLeaseTimeoutPeriod = rs.conf.getInt(
1243      HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
1244      HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
1245    maxScannerResultSize = rs.conf.getLong(
1246      HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
1247      HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE);
1248    rpcTimeout = rs.conf.getInt(
1249      HConstants.HBASE_RPC_TIMEOUT_KEY,
1250      HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
1251    minimumScanTimeLimitDelta = rs.conf.getLong(
1252      REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA,
1253      DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA);
1254
1255    InetSocketAddress address = rpcServer.getListenerAddress();
1256    if (address == null) {
1257      throw new IOException("Listener channel is closed");
1258    }
1259    // Set our address, however we need the final port that was given to rpcServer
1260    isa = new InetSocketAddress(initialIsa.getHostName(), address.getPort());
1261    rpcServer.setErrorHandler(this);
1262    rs.setName(name);
1263
1264    closedScanners = CacheBuilder.newBuilder()
1265        .expireAfterAccess(scannerLeaseTimeoutPeriod, TimeUnit.MILLISECONDS).build();
1266  }
1267
1268  protected RpcServerInterface createRpcServer(Server server, Configuration conf,
1269      RpcSchedulerFactory rpcSchedulerFactory, InetSocketAddress bindAddress, String name)
1270      throws IOException {
1271    boolean reservoirEnabled = conf.getBoolean(RESERVOIR_ENABLED_KEY, true);
1272    try {
1273      return RpcServerFactory.createRpcServer(server, name, getServices(),
1274          bindAddress, // use final bindAddress for this server.
1275          conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled);
1276    } catch (BindException be) {
1277      throw new IOException(be.getMessage() + ". To switch ports use the '"
1278          + HConstants.REGIONSERVER_PORT + "' configuration property.",
1279          be.getCause() != null ? be.getCause() : be);
1280    }
1281  }
1282
1283  protected Class<?> getRpcSchedulerFactoryClass() {
1284    return this.regionServer.conf.getClass(REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
1285      SimpleRpcSchedulerFactory.class);
1286  }
1287
1288  @Override
1289  public void onConfigurationChange(Configuration newConf) {
1290    if (rpcServer instanceof ConfigurationObserver) {
1291      ((ConfigurationObserver)rpcServer).onConfigurationChange(newConf);
1292    }
1293  }
1294
1295  protected PriorityFunction createPriority() {
1296    return new AnnotationReadingPriorityFunction(this);
1297  }
1298
1299  protected void requirePermission(String request, Permission.Action perm) throws IOException {
1300    if (accessChecker != null) {
1301      accessChecker.requirePermission(RpcServer.getRequestUser().orElse(null), request, perm);
1302    }
1303  }
1304
1305
1306  public static String getHostname(Configuration conf, boolean isMaster)
1307      throws UnknownHostException {
1308    String hostname = conf.get(isMaster? HRegionServer.MASTER_HOSTNAME_KEY :
1309      HRegionServer.RS_HOSTNAME_KEY);
1310    if (hostname == null || hostname.isEmpty()) {
1311      String masterOrRS = isMaster ? "master" : "regionserver";
1312      return Strings.domainNamePointerToHostName(DNS.getDefaultHost(
1313        conf.get("hbase." + masterOrRS + ".dns.interface", "default"),
1314        conf.get("hbase." + masterOrRS + ".dns.nameserver", "default")));
1315    } else {
1316      LOG.info("hostname is configured to be " + hostname);
1317      return hostname;
1318    }
1319  }
1320
1321  @VisibleForTesting
1322  public int getScannersCount() {
1323    return scanners.size();
1324  }
1325
1326  public
1327  RegionScanner getScanner(long scannerId) {
1328    String scannerIdString = Long.toString(scannerId);
1329    RegionScannerHolder scannerHolder = scanners.get(scannerIdString);
1330    if (scannerHolder != null) {
1331      return scannerHolder.s;
1332    }
1333    return null;
1334  }
1335
1336  public String getScanDetailsWithId(long scannerId) {
1337    RegionScanner scanner = getScanner(scannerId);
1338    if (scanner == null) {
1339      return null;
1340    }
1341    StringBuilder builder = new StringBuilder();
1342    builder.append("table: ").append(scanner.getRegionInfo().getTable().getNameAsString());
1343    builder.append(" region: ").append(scanner.getRegionInfo().getRegionNameAsString());
1344    return builder.toString();
1345  }
1346
1347  /**
1348   * Get the vtime associated with the scanner.
1349   * Currently the vtime is the number of "next" calls.
1350   */
1351  long getScannerVirtualTime(long scannerId) {
1352    String scannerIdString = Long.toString(scannerId);
1353    RegionScannerHolder scannerHolder = scanners.get(scannerIdString);
1354    if (scannerHolder != null) {
1355      return scannerHolder.getNextCallSeq();
1356    }
1357    return 0L;
1358  }
1359
1360  /**
1361   * Method to account for the size of retained cells and retained data blocks.
1362   * @return an object that represents the last referenced block from this response.
1363   */
1364  Object addSize(RpcCallContext context, Result r, Object lastBlock) {
1365    if (context != null && r != null && !r.isEmpty()) {
1366      for (Cell c : r.rawCells()) {
1367        context.incrementResponseCellSize(PrivateCellUtil.estimatedSerializedSizeOf(c));
1368
1369        // Since byte buffers can point all kinds of crazy places it's harder to keep track
1370        // of which blocks are kept alive by what byte buffer.
1371        // So we make a guess.
1372        if (c instanceof ByteBufferExtendedCell) {
1373          ByteBufferExtendedCell bbCell = (ByteBufferExtendedCell) c;
1374          ByteBuffer bb = bbCell.getValueByteBuffer();
1375          if (bb != lastBlock) {
1376            context.incrementResponseBlockSize(bb.capacity());
1377            lastBlock = bb;
1378          }
1379        } else {
1380          // We're using the last block being the same as the current block as
1381          // a proxy for pointing to a new block. This won't be exact.
1382          // If there are multiple gets that bounce back and forth
1383          // Then it's possible that this will over count the size of
1384          // referenced blocks. However it's better to over count and
1385          // use two rpcs than to OOME the regionserver.
1386          byte[] valueArray = c.getValueArray();
1387          if (valueArray != lastBlock) {
1388            context.incrementResponseBlockSize(valueArray.length);
1389            lastBlock = valueArray;
1390          }
1391        }
1392
1393      }
1394    }
1395    return lastBlock;
1396  }
1397
1398  private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Shipper shipper,
1399      HRegion r, boolean needCursor) throws LeaseStillHeldException {
1400    Lease lease = regionServer.leases.createLease(scannerName, this.scannerLeaseTimeoutPeriod,
1401      new ScannerListener(scannerName));
1402    RpcCallback shippedCallback = new RegionScannerShippedCallBack(scannerName, shipper, lease);
1403    RpcCallback closeCallback;
1404    if (s instanceof RpcCallback) {
1405      closeCallback = (RpcCallback) s;
1406    } else {
1407      closeCallback = new RegionScannerCloseCallBack(s);
1408    }
1409    RegionScannerHolder rsh =
1410        new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback, needCursor);
1411    RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh);
1412    assert existing == null : "scannerId must be unique within regionserver's whole lifecycle! " +
1413      scannerName;
1414    return rsh;
1415  }
1416
1417  /**
1418   * Find the HRegion based on a region specifier
1419   *
1420   * @param regionSpecifier the region specifier
1421   * @return the corresponding region
1422   * @throws IOException if the specifier is not null,
1423   *    but failed to find the region
1424   */
1425  @VisibleForTesting
1426  public HRegion getRegion(
1427      final RegionSpecifier regionSpecifier) throws IOException {
1428    return regionServer.getRegion(regionSpecifier.getValue().toByteArray());
1429  }
1430
1431  /**
1432   * Find the List of HRegions based on a list of region specifiers
1433   *
1434   * @param regionSpecifiers the list of region specifiers
1435   * @return the corresponding list of regions
1436   * @throws IOException if any of the specifiers is not null,
1437   *    but failed to find the region
1438   */
1439  private List<HRegion> getRegions(final List<RegionSpecifier> regionSpecifiers,
1440      final CacheEvictionStatsBuilder stats) {
1441    List<HRegion> regions = Lists.newArrayListWithCapacity(regionSpecifiers.size());
1442    for (RegionSpecifier regionSpecifier: regionSpecifiers) {
1443      try {
1444        regions.add(regionServer.getRegion(regionSpecifier.getValue().toByteArray()));
1445      } catch (NotServingRegionException e) {
1446        stats.addException(regionSpecifier.getValue().toByteArray(), e);
1447      }
1448    }
1449    return regions;
1450  }
1451
1452  @VisibleForTesting
1453  public PriorityFunction getPriority() {
1454    return priority;
1455  }
1456
1457  @VisibleForTesting
1458  public Configuration getConfiguration() {
1459    return regionServer.getConfiguration();
1460  }
1461
1462  private RegionServerRpcQuotaManager getRpcQuotaManager() {
1463    return regionServer.getRegionServerRpcQuotaManager();
1464  }
1465
1466  private RegionServerSpaceQuotaManager getSpaceQuotaManager() {
1467    return regionServer.getRegionServerSpaceQuotaManager();
1468  }
1469
1470  void start(ZKWatcher zkWatcher) {
1471    if (AccessChecker.isAuthorizationSupported(getConfiguration())) {
1472      accessChecker = new AccessChecker(getConfiguration(), zkWatcher);
1473    }
1474    this.scannerIdGenerator = new ScannerIdGenerator(this.regionServer.serverName);
1475    rpcServer.start();
1476  }
1477
1478  void stop() {
1479    if (accessChecker != null) {
1480      accessChecker.stop();
1481    }
1482    closeAllScanners();
1483    rpcServer.stop();
1484  }
1485
1486  /**
1487   * Called to verify that this server is up and running.
1488   */
1489  // TODO : Rename this and HMaster#checkInitialized to isRunning() (or a better name).
1490  protected void checkOpen() throws IOException {
1491    if (regionServer.isAborted()) {
1492      throw new RegionServerAbortedException("Server " + regionServer.serverName + " aborting");
1493    }
1494    if (regionServer.isStopped()) {
1495      throw new RegionServerStoppedException("Server " + regionServer.serverName + " stopping");
1496    }
1497    if (!regionServer.fsOk) {
1498      throw new RegionServerStoppedException("File system not available");
1499    }
1500    if (!regionServer.isOnline()) {
1501      throw new ServerNotRunningYetException("Server " + regionServer.serverName
1502          + " is not running yet");
1503    }
1504  }
1505
1506  /**
1507   * By default, put up an Admin and a Client Service.
1508   * Set booleans <code>hbase.regionserver.admin.executorService</code> and
1509   * <code>hbase.regionserver.client.executorService</code> if you want to enable/disable services.
1510   * Default is that both are enabled.
1511   * @return immutable list of blocking services and the security info classes that this server
1512   * supports
1513   */
1514  protected List<BlockingServiceAndInterface> getServices() {
1515    boolean admin =
1516      getConfiguration().getBoolean(REGIONSERVER_ADMIN_SERVICE_CONFIG, true);
1517    boolean client =
1518      getConfiguration().getBoolean(REGIONSERVER_CLIENT_SERVICE_CONFIG, true);
1519    List<BlockingServiceAndInterface> bssi = new ArrayList<>();
1520    if (client) {
1521      bssi.add(new BlockingServiceAndInterface(
1522      ClientService.newReflectiveBlockingService(this),
1523      ClientService.BlockingInterface.class));
1524    }
1525    if (admin) {
1526      bssi.add(new BlockingServiceAndInterface(
1527      AdminService.newReflectiveBlockingService(this),
1528      AdminService.BlockingInterface.class));
1529    }
1530    return new org.apache.hbase.thirdparty.com.google.common.collect.
1531        ImmutableList.Builder<BlockingServiceAndInterface>().addAll(bssi).build();
1532  }
1533
1534  public InetSocketAddress getSocketAddress() {
1535    return isa;
1536  }
1537
1538  @Override
1539  public int getPriority(RequestHeader header, Message param, User user) {
1540    return priority.getPriority(header, param, user);
1541  }
1542
1543  @Override
1544  public long getDeadline(RequestHeader header, Message param) {
1545    return priority.getDeadline(header, param);
1546  }
1547
1548  /*
1549   * Check if an OOME and, if so, abort immediately to avoid creating more objects.
1550   *
1551   * @param e
1552   *
1553   * @return True if we OOME'd and are aborting.
1554   */
1555  @Override
1556  public boolean checkOOME(final Throwable e) {
1557    return exitIfOOME(e);
1558  }
1559
1560  public static boolean exitIfOOME(final Throwable e ){
1561    boolean stop = false;
1562    try {
1563      if (e instanceof OutOfMemoryError
1564          || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError)
1565          || (e.getMessage() != null && e.getMessage().contains(
1566              "java.lang.OutOfMemoryError"))) {
1567        stop = true;
1568        LOG.error(HBaseMarkers.FATAL, "Run out of memory; "
1569          + RSRpcServices.class.getSimpleName() + " will abort itself immediately",
1570          e);
1571      }
1572    } finally {
1573      if (stop) {
1574        Runtime.getRuntime().halt(1);
1575      }
1576    }
1577    return stop;
1578  }
1579
1580  /**
1581   * Close a region on the region server.
1582   *
1583   * @param controller the RPC controller
1584   * @param request the request
1585   * @throws ServiceException
1586   */
1587  @Override
1588  @QosPriority(priority=HConstants.ADMIN_QOS)
1589  public CloseRegionResponse closeRegion(final RpcController controller,
1590      final CloseRegionRequest request) throws ServiceException {
1591    final ServerName sn = (request.hasDestinationServer() ?
1592      ProtobufUtil.toServerName(request.getDestinationServer()) : null);
1593
1594    try {
1595      checkOpen();
1596      if (request.hasServerStartCode()) {
1597        // check that we are the same server that this RPC is intended for.
1598        long serverStartCode = request.getServerStartCode();
1599        if (regionServer.serverName.getStartcode() !=  serverStartCode) {
1600          throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
1601              "different server with startCode: " + serverStartCode + ", this server is: "
1602              + regionServer.serverName));
1603        }
1604      }
1605      final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
1606
1607      requestCount.increment();
1608      if (sn == null) {
1609        LOG.info("Close " + encodedRegionName + " without moving");
1610      } else {
1611        LOG.info("Close " + encodedRegionName + ", moving to " + sn);
1612      }
1613      boolean closed = regionServer.closeRegion(encodedRegionName, false, sn);
1614      CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
1615      return builder.build();
1616    } catch (IOException ie) {
1617      throw new ServiceException(ie);
1618    }
1619  }
1620
1621  /**
1622   * Compact a region on the region server.
1623   *
1624   * @param controller the RPC controller
1625   * @param request the request
1626   * @throws ServiceException
1627   */
1628  @Override
1629  @QosPriority(priority = HConstants.ADMIN_QOS)
1630  public CompactRegionResponse compactRegion(final RpcController controller,
1631      final CompactRegionRequest request) throws ServiceException {
1632    try {
1633      checkOpen();
1634      requestCount.increment();
1635      HRegion region = getRegion(request.getRegion());
1636      // Quota support is enabled, the requesting user is not system/super user
1637      // and a quota policy is enforced that disables compactions.
1638      if (QuotaUtil.isQuotaEnabled(getConfiguration()) &&
1639          !Superusers.isSuperUser(RpcServer.getRequestUser().orElse(null)) &&
1640          this.regionServer.getRegionServerSpaceQuotaManager()
1641              .areCompactionsDisabled(region.getTableDescriptor().getTableName())) {
1642        throw new DoNotRetryIOException(
1643            "Compactions on this region are " + "disabled due to a space quota violation.");
1644      }
1645      region.startRegionOperation(Operation.COMPACT_REGION);
1646      LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString());
1647      boolean major = request.hasMajor() && request.getMajor();
1648      if (request.hasFamily()) {
1649        byte[] family = request.getFamily().toByteArray();
1650        String log = "User-triggered " + (major ? "major " : "") + "compaction for region " +
1651            region.getRegionInfo().getRegionNameAsString() + " and family " +
1652            Bytes.toString(family);
1653        LOG.trace(log);
1654        region.requestCompaction(family, log, Store.PRIORITY_USER, major,
1655          CompactionLifeCycleTracker.DUMMY);
1656      } else {
1657        String log = "User-triggered " + (major ? "major " : "") + "compaction for region " +
1658            region.getRegionInfo().getRegionNameAsString();
1659        LOG.trace(log);
1660        region.requestCompaction(log, Store.PRIORITY_USER, major, CompactionLifeCycleTracker.DUMMY);
1661      }
1662      return CompactRegionResponse.newBuilder().build();
1663    } catch (IOException ie) {
1664      throw new ServiceException(ie);
1665    }
1666  }
1667
1668  /**
1669   * Flush a region on the region server.
1670   *
1671   * @param controller the RPC controller
1672   * @param request the request
1673   * @throws ServiceException
1674   */
1675  @Override
1676  @QosPriority(priority=HConstants.ADMIN_QOS)
1677  public FlushRegionResponse flushRegion(final RpcController controller,
1678      final FlushRegionRequest request) throws ServiceException {
1679    try {
1680      checkOpen();
1681      requestCount.increment();
1682      HRegion region = getRegion(request.getRegion());
1683      LOG.info("Flushing " + region.getRegionInfo().getRegionNameAsString());
1684      boolean shouldFlush = true;
1685      if (request.hasIfOlderThanTs()) {
1686        shouldFlush = region.getEarliestFlushTimeForAllStores() < request.getIfOlderThanTs();
1687      }
1688      FlushRegionResponse.Builder builder = FlushRegionResponse.newBuilder();
1689      if (shouldFlush) {
1690        boolean writeFlushWalMarker =  request.hasWriteFlushWalMarker() ?
1691            request.getWriteFlushWalMarker() : false;
1692        // Go behind the curtain so we can manage writing of the flush WAL marker
1693        HRegion.FlushResultImpl flushResult =
1694            region.flushcache(true, writeFlushWalMarker, FlushLifeCycleTracker.DUMMY);
1695        boolean compactionNeeded = flushResult.isCompactionNeeded();
1696        if (compactionNeeded) {
1697          regionServer.compactSplitThread.requestSystemCompaction(region,
1698            "Compaction through user triggered flush");
1699        }
1700        builder.setFlushed(flushResult.isFlushSucceeded());
1701        builder.setWroteFlushWalMarker(flushResult.wroteFlushWalMarker);
1702      }
1703      builder.setLastFlushTime(region.getEarliestFlushTimeForAllStores());
1704      return builder.build();
1705    } catch (DroppedSnapshotException ex) {
1706      // Cache flush can fail in a few places. If it fails in a critical
1707      // section, we get a DroppedSnapshotException and a replay of wal
1708      // is required. Currently the only way to do this is a restart of
1709      // the server.
1710      regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
1711      throw new ServiceException(ex);
1712    } catch (IOException ie) {
1713      throw new ServiceException(ie);
1714    }
1715  }
1716
1717  @Override
1718  @QosPriority(priority=HConstants.ADMIN_QOS)
1719  public GetOnlineRegionResponse getOnlineRegion(final RpcController controller,
1720      final GetOnlineRegionRequest request) throws ServiceException {
1721    try {
1722      checkOpen();
1723      requestCount.increment();
1724      Map<String, HRegion> onlineRegions = regionServer.onlineRegions;
1725      List<RegionInfo> list = new ArrayList<>(onlineRegions.size());
1726      for (HRegion region: onlineRegions.values()) {
1727        list.add(region.getRegionInfo());
1728      }
1729      Collections.sort(list, RegionInfo.COMPARATOR);
1730      return ResponseConverter.buildGetOnlineRegionResponse(list);
1731    } catch (IOException ie) {
1732      throw new ServiceException(ie);
1733    }
1734  }
1735
1736  @Override
1737  @QosPriority(priority=HConstants.ADMIN_QOS)
1738  public GetRegionInfoResponse getRegionInfo(final RpcController controller,
1739      final GetRegionInfoRequest request) throws ServiceException {
1740    try {
1741      checkOpen();
1742      requestCount.increment();
1743      HRegion region = getRegion(request.getRegion());
1744      RegionInfo info = region.getRegionInfo();
1745      byte[] bestSplitRow = null;
1746      boolean shouldSplit = true;
1747      if (request.hasBestSplitRow() && request.getBestSplitRow()) {
1748        HRegion r = region;
1749        region.startRegionOperation(Operation.SPLIT_REGION);
1750        r.forceSplit(null);
1751        // Even after setting force split if split policy says no to split then we should not split.
1752        shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion();
1753        bestSplitRow = r.checkSplit();
1754        // when all table data are in memstore, bestSplitRow = null
1755        // try to flush region first
1756        if(bestSplitRow == null) {
1757          r.flush(true);
1758          bestSplitRow = r.checkSplit();
1759        }
1760        r.clearSplit();
1761      }
1762      GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
1763      builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
1764      if (request.hasCompactionState() && request.getCompactionState()) {
1765        builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState()));
1766      }
1767      builder.setSplittable(region.isSplittable() && shouldSplit);
1768      builder.setMergeable(region.isMergeable());
1769      if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) {
1770        builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow));
1771      }
1772      return builder.build();
1773    } catch (IOException ie) {
1774      throw new ServiceException(ie);
1775    }
1776  }
1777
1778  @Override
1779  @QosPriority(priority=HConstants.ADMIN_QOS)
1780  public GetRegionLoadResponse getRegionLoad(RpcController controller,
1781      GetRegionLoadRequest request) throws ServiceException {
1782
1783    List<HRegion> regions;
1784    if (request.hasTableName()) {
1785      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
1786      regions = regionServer.getRegions(tableName);
1787    } else {
1788      regions = regionServer.getRegions();
1789    }
1790    List<RegionLoad> rLoads = new ArrayList<>(regions.size());
1791    RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
1792    RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
1793
1794    try {
1795      for (HRegion region : regions) {
1796        rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier));
1797      }
1798    } catch (IOException e) {
1799      throw new ServiceException(e);
1800    }
1801    GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
1802    builder.addAllRegionLoads(rLoads);
1803    return builder.build();
1804  }
1805
1806  @Override
1807  @QosPriority(priority=HConstants.ADMIN_QOS)
1808  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller,
1809    ClearCompactionQueuesRequest request) throws ServiceException {
1810    LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/"
1811        + RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue");
1812    ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder();
1813    requestCount.increment();
1814    if (clearCompactionQueues.compareAndSet(false,true)) {
1815      try {
1816        checkOpen();
1817        regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues();
1818        for (String queueName : request.getQueueNameList()) {
1819          LOG.debug("clear " + queueName + " compaction queue");
1820          switch (queueName) {
1821            case "long":
1822              regionServer.compactSplitThread.clearLongCompactionsQueue();
1823              break;
1824            case "short":
1825              regionServer.compactSplitThread.clearShortCompactionsQueue();
1826              break;
1827            default:
1828              LOG.warn("Unknown queue name " + queueName);
1829              throw new IOException("Unknown queue name " + queueName);
1830          }
1831        }
1832        regionServer.getRegionServerCoprocessorHost().postClearCompactionQueues();
1833      } catch (IOException ie) {
1834        throw new ServiceException(ie);
1835      } finally {
1836        clearCompactionQueues.set(false);
1837      }
1838    } else {
1839      LOG.warn("Clear compactions queue is executing by other admin.");
1840    }
1841    return respBuilder.build();
1842  }
1843
1844  /**
1845   * Get some information of the region server.
1846   *
1847   * @param controller the RPC controller
1848   * @param request the request
1849   * @throws ServiceException
1850   */
1851  @Override
1852  @QosPriority(priority=HConstants.ADMIN_QOS)
1853  public GetServerInfoResponse getServerInfo(final RpcController controller,
1854      final GetServerInfoRequest request) throws ServiceException {
1855    try {
1856      checkOpen();
1857    } catch (IOException ie) {
1858      throw new ServiceException(ie);
1859    }
1860    requestCount.increment();
1861    int infoPort = regionServer.infoServer != null ? regionServer.infoServer.getPort() : -1;
1862    return ResponseConverter.buildGetServerInfoResponse(regionServer.serverName, infoPort);
1863  }
1864
1865  @Override
1866  @QosPriority(priority=HConstants.ADMIN_QOS)
1867  public GetStoreFileResponse getStoreFile(final RpcController controller,
1868      final GetStoreFileRequest request) throws ServiceException {
1869    try {
1870      checkOpen();
1871      HRegion region = getRegion(request.getRegion());
1872      requestCount.increment();
1873      Set<byte[]> columnFamilies;
1874      if (request.getFamilyCount() == 0) {
1875        columnFamilies = region.getTableDescriptor().getColumnFamilyNames();
1876      } else {
1877        columnFamilies = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
1878        for (ByteString cf: request.getFamilyList()) {
1879          columnFamilies.add(cf.toByteArray());
1880        }
1881      }
1882      int nCF = columnFamilies.size();
1883      List<String>  fileList = region.getStoreFileList(
1884        columnFamilies.toArray(new byte[nCF][]));
1885      GetStoreFileResponse.Builder builder = GetStoreFileResponse.newBuilder();
1886      builder.addAllStoreFile(fileList);
1887      return builder.build();
1888    } catch (IOException ie) {
1889      throw new ServiceException(ie);
1890    }
1891  }
1892
1893  /**
1894   * Open asynchronously a region or a set of regions on the region server.
1895   *
1896   * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
1897   *  before being called. As a consequence, this method should be called only from the master.
1898   * <p>
1899   * Different manages states for the region are:
1900   * </p><ul>
1901   *  <li>region not opened: the region opening will start asynchronously.</li>
1902   *  <li>a close is already in progress: this is considered as an error.</li>
1903   *  <li>an open is already in progress: this new open request will be ignored. This is important
1904   *  because the Master can do multiple requests if it crashes.</li>
1905   *  <li>the region is already opened:  this new open request will be ignored.</li>
1906   *  </ul>
1907   * <p>
1908   * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
1909   * For a single region opening, errors are sent through a ServiceException. For bulk assign,
1910   * errors are put in the response as FAILED_OPENING.
1911   * </p>
1912   * @param controller the RPC controller
1913   * @param request the request
1914   * @throws ServiceException
1915   */
1916  @Override
1917  @QosPriority(priority=HConstants.ADMIN_QOS)
1918  public OpenRegionResponse openRegion(final RpcController controller,
1919      final OpenRegionRequest request) throws ServiceException {
1920    requestCount.increment();
1921    if (request.hasServerStartCode()) {
1922      // check that we are the same server that this RPC is intended for.
1923      long serverStartCode = request.getServerStartCode();
1924      if (regionServer.serverName.getStartcode() !=  serverStartCode) {
1925        throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
1926            "different server with startCode: " + serverStartCode + ", this server is: "
1927            + regionServer.serverName));
1928      }
1929    }
1930
1931    OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
1932    final int regionCount = request.getOpenInfoCount();
1933    final Map<TableName, TableDescriptor> htds = new HashMap<>(regionCount);
1934    final boolean isBulkAssign = regionCount > 1;
1935    try {
1936      checkOpen();
1937    } catch (IOException ie) {
1938      TableName tableName = null;
1939      if (regionCount == 1) {
1940        org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo ri = request.getOpenInfo(0).getRegion();
1941        if (ri != null) {
1942          tableName = ProtobufUtil.toTableName(ri.getTableName());
1943        }
1944      }
1945      if (!TableName.META_TABLE_NAME.equals(tableName)) {
1946        throw new ServiceException(ie);
1947      }
1948      // We are assigning meta, wait a little for regionserver to finish initialization.
1949      int timeout = regionServer.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
1950        HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout
1951      long endTime = System.currentTimeMillis() + timeout;
1952      synchronized (regionServer.online) {
1953        try {
1954          while (System.currentTimeMillis() <= endTime
1955              && !regionServer.isStopped() && !regionServer.isOnline()) {
1956            regionServer.online.wait(regionServer.msgInterval);
1957          }
1958          checkOpen();
1959        } catch (InterruptedException t) {
1960          Thread.currentThread().interrupt();
1961          throw new ServiceException(t);
1962        } catch (IOException e) {
1963          throw new ServiceException(e);
1964        }
1965      }
1966    }
1967
1968    long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
1969
1970    for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
1971      final RegionInfo region = ProtobufUtil.toRegionInfo(regionOpenInfo.getRegion());
1972      TableDescriptor htd;
1973      try {
1974        String encodedName = region.getEncodedName();
1975        byte[] encodedNameBytes = region.getEncodedNameAsBytes();
1976        final HRegion onlineRegion = regionServer.getRegion(encodedName);
1977        if (onlineRegion != null) {
1978          // The region is already online. This should not happen any more.
1979          String error = "Received OPEN for the region:"
1980            + region.getRegionNameAsString() + ", which is already online";
1981          LOG.warn(error);
1982          //regionServer.abort(error);
1983          //throw new IOException(error);
1984          builder.addOpeningState(RegionOpeningState.OPENED);
1985          continue;
1986        }
1987        LOG.info("Open " + region.getRegionNameAsString());
1988
1989        final Boolean previous = regionServer.regionsInTransitionInRS.putIfAbsent(
1990          encodedNameBytes, Boolean.TRUE);
1991
1992        if (Boolean.FALSE.equals(previous)) {
1993          if (regionServer.getRegion(encodedName) != null) {
1994            // There is a close in progress. This should not happen any more.
1995            String error = "Received OPEN for the region:"
1996              + region.getRegionNameAsString() + ", which we are already trying to CLOSE";
1997            regionServer.abort(error);
1998            throw new IOException(error);
1999          }
2000          regionServer.regionsInTransitionInRS.put(encodedNameBytes, Boolean.TRUE);
2001        }
2002
2003        if (Boolean.TRUE.equals(previous)) {
2004          // An open is in progress. This is supported, but let's log this.
2005          LOG.info("Receiving OPEN for the region:" +
2006            region.getRegionNameAsString() + ", which we are already trying to OPEN"
2007              + " - ignoring this new request for this region.");
2008        }
2009
2010        // We are opening this region. If it moves back and forth for whatever reason, we don't
2011        // want to keep returning the stale moved record while we are opening/if we close again.
2012        regionServer.removeFromMovedRegions(region.getEncodedName());
2013
2014        if (previous == null || !previous.booleanValue()) {
2015          htd = htds.get(region.getTable());
2016          if (htd == null) {
2017            htd = regionServer.tableDescriptors.get(region.getTable());
2018            htds.put(region.getTable(), htd);
2019          }
2020          if (htd == null) {
2021            throw new IOException("Missing table descriptor for " + region.getEncodedName());
2022          }
2023          // If there is no action in progress, we can submit a specific handler.
2024          // Need to pass the expected version in the constructor.
2025          if (regionServer.executorService == null) {
2026            LOG.info("No executor executorService; skipping open request");
2027          } else {
2028            if (region.isMetaRegion()) {
2029              regionServer.executorService.submit(new OpenMetaHandler(
2030              regionServer, regionServer, region, htd, masterSystemTime));
2031            } else {
2032              if (regionOpenInfo.getFavoredNodesCount() > 0) {
2033                regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(),
2034                regionOpenInfo.getFavoredNodesList());
2035              }
2036              if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
2037                regionServer.executorService.submit(new OpenPriorityRegionHandler(
2038                regionServer, regionServer, region, htd, masterSystemTime));
2039              } else {
2040                regionServer.executorService.submit(new OpenRegionHandler(
2041                regionServer, regionServer, region, htd, masterSystemTime));
2042              }
2043            }
2044          }
2045        }
2046
2047        builder.addOpeningState(RegionOpeningState.OPENED);
2048      } catch (IOException ie) {
2049        LOG.warn("Failed opening region " + region.getRegionNameAsString(), ie);
2050        if (isBulkAssign) {
2051          builder.addOpeningState(RegionOpeningState.FAILED_OPENING);
2052        } else {
2053          throw new ServiceException(ie);
2054        }
2055      }
2056    }
2057    return builder.build();
2058  }
2059
2060  /**
2061   *  Wamrmup a region on this server.
2062   *
2063   * This method should only be called by Master. It synchrnously opens the region and
2064   * closes the region bringing the most important pages in cache.
2065   * <p>
2066   *
2067   * @param controller the RPC controller
2068   * @param request the request
2069   * @throws ServiceException
2070   */
2071  @Override
2072  public WarmupRegionResponse warmupRegion(final RpcController controller,
2073      final WarmupRegionRequest request) throws ServiceException {
2074
2075    final RegionInfo region = ProtobufUtil.toRegionInfo(request.getRegionInfo());
2076    TableDescriptor htd;
2077    WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
2078
2079    try {
2080      checkOpen();
2081      String encodedName = region.getEncodedName();
2082      byte[] encodedNameBytes = region.getEncodedNameAsBytes();
2083      final HRegion onlineRegion = regionServer.getRegion(encodedName);
2084
2085      if (onlineRegion != null) {
2086        LOG.info("Region already online. Skipping warming up " + region);
2087        return response;
2088      }
2089
2090      if (LOG.isDebugEnabled()) {
2091        LOG.debug("Warming up Region " + region.getRegionNameAsString());
2092      }
2093
2094      htd = regionServer.tableDescriptors.get(region.getTable());
2095
2096      if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) {
2097        LOG.info("Region is in transition. Skipping warmup " + region);
2098        return response;
2099      }
2100
2101      HRegion.warmupHRegion(region, htd, regionServer.getWAL(region),
2102          regionServer.getConfiguration(), regionServer, null);
2103
2104    } catch (IOException ie) {
2105      LOG.error("Failed warming up region " + region.getRegionNameAsString(), ie);
2106      throw new ServiceException(ie);
2107    }
2108
2109    return response;
2110  }
2111
2112  /**
2113   * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
2114   * that the given mutations will be durable on the receiving RS if this method returns without any
2115   * exception.
2116   * @param controller the RPC controller
2117   * @param request the request
2118   * @throws ServiceException
2119   */
2120  @Override
2121  @QosPriority(priority = HConstants.REPLAY_QOS)
2122  public ReplicateWALEntryResponse replay(final RpcController controller,
2123      final ReplicateWALEntryRequest request) throws ServiceException {
2124    long before = EnvironmentEdgeManager.currentTime();
2125    CellScanner cells = ((HBaseRpcController) controller).cellScanner();
2126    try {
2127      checkOpen();
2128      List<WALEntry> entries = request.getEntryList();
2129      if (entries == null || entries.isEmpty()) {
2130        // empty input
2131        return ReplicateWALEntryResponse.newBuilder().build();
2132      }
2133      ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
2134      HRegion region = regionServer.getRegionByEncodedName(regionName.toStringUtf8());
2135      RegionCoprocessorHost coprocessorHost =
2136          ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())
2137            ? region.getCoprocessorHost()
2138            : null; // do not invoke coprocessors if this is a secondary region replica
2139      List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
2140
2141      // Skip adding the edits to WAL if this is a secondary region replica
2142      boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
2143      Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
2144
2145      for (WALEntry entry : entries) {
2146        if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
2147          throw new NotServingRegionException("Replay request contains entries from multiple " +
2148              "regions. First region:" + regionName.toStringUtf8() + " , other region:"
2149              + entry.getKey().getEncodedRegionName());
2150        }
2151        if (regionServer.nonceManager != null && isPrimary) {
2152          long nonceGroup = entry.getKey().hasNonceGroup()
2153            ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
2154          long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
2155          regionServer.nonceManager.reportOperationFromWal(
2156              nonceGroup,
2157              nonce,
2158              entry.getKey().getWriteTime());
2159        }
2160        Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
2161        List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry,
2162          cells, walEntry, durability);
2163        if (coprocessorHost != null) {
2164          // Start coprocessor replay here. The coprocessor is for each WALEdit instead of a
2165          // KeyValue.
2166          if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(),
2167            walEntry.getSecond())) {
2168            // if bypass this log entry, ignore it ...
2169            continue;
2170          }
2171          walEntries.add(walEntry);
2172        }
2173        if(edits!=null && !edits.isEmpty()) {
2174          // HBASE-17924
2175          // sort to improve lock efficiency
2176          Collections.sort(edits, (v1, v2) -> Row.COMPARATOR.compare(v1.mutation, v2.mutation));
2177          long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ?
2178            entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
2179          OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
2180          // check if it's a partial success
2181          for (int i = 0; result != null && i < result.length; i++) {
2182            if (result[i] != OperationStatus.SUCCESS) {
2183              throw new IOException(result[i].getExceptionMsg());
2184            }
2185          }
2186        }
2187      }
2188
2189      //sync wal at the end because ASYNC_WAL is used above
2190      WAL wal = region.getWAL();
2191      if (wal != null) {
2192        wal.sync();
2193      }
2194
2195      if (coprocessorHost != null) {
2196        for (Pair<WALKey, WALEdit> entry : walEntries) {
2197          coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(),
2198            entry.getSecond());
2199        }
2200      }
2201      return ReplicateWALEntryResponse.newBuilder().build();
2202    } catch (IOException ie) {
2203      throw new ServiceException(ie);
2204    } finally {
2205      if (regionServer.metricsRegionServer != null) {
2206        regionServer.metricsRegionServer.updateReplay(
2207          EnvironmentEdgeManager.currentTime() - before);
2208      }
2209    }
2210  }
2211
2212  /**
2213   * Replicate WAL entries on the region server.
2214   *
2215   * @param controller the RPC controller
2216   * @param request the request
2217   * @throws ServiceException
2218   */
2219  @Override
2220  @QosPriority(priority=HConstants.REPLICATION_QOS)
2221  public ReplicateWALEntryResponse replicateWALEntry(final RpcController controller,
2222      final ReplicateWALEntryRequest request) throws ServiceException {
2223    try {
2224      checkOpen();
2225      if (regionServer.replicationSinkHandler != null) {
2226        requestCount.increment();
2227        List<WALEntry> entries = request.getEntryList();
2228        CellScanner cellScanner = ((HBaseRpcController)controller).cellScanner();
2229        regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries();
2230        regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner,
2231          request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(),
2232          request.getSourceHFileArchiveDirPath());
2233        regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries();
2234        return ReplicateWALEntryResponse.newBuilder().build();
2235      } else {
2236        throw new ServiceException("Replication services are not initialized yet");
2237      }
2238    } catch (IOException ie) {
2239      throw new ServiceException(ie);
2240    }
2241  }
2242
2243  /**
2244   * Roll the WAL writer of the region server.
2245   * @param controller the RPC controller
2246   * @param request the request
2247   * @throws ServiceException
2248   */
2249  @Override
2250  public RollWALWriterResponse rollWALWriter(final RpcController controller,
2251      final RollWALWriterRequest request) throws ServiceException {
2252    try {
2253      checkOpen();
2254      requestCount.increment();
2255      regionServer.getRegionServerCoprocessorHost().preRollWALWriterRequest();
2256      regionServer.walRoller.requestRollAll();
2257      regionServer.getRegionServerCoprocessorHost().postRollWALWriterRequest();
2258      RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder();
2259      return builder.build();
2260    } catch (IOException ie) {
2261      throw new ServiceException(ie);
2262    }
2263  }
2264
2265
2266  /**
2267   * Stop the region server.
2268   *
2269   * @param controller the RPC controller
2270   * @param request the request
2271   * @throws ServiceException
2272   */
2273  @Override
2274  @QosPriority(priority=HConstants.ADMIN_QOS)
2275  public StopServerResponse stopServer(final RpcController controller,
2276      final StopServerRequest request) throws ServiceException {
2277    requestCount.increment();
2278    String reason = request.getReason();
2279    regionServer.stop(reason);
2280    return StopServerResponse.newBuilder().build();
2281  }
2282
2283  @Override
2284  public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
2285      UpdateFavoredNodesRequest request) throws ServiceException {
2286    List<UpdateFavoredNodesRequest.RegionUpdateInfo> openInfoList = request.getUpdateInfoList();
2287    UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder();
2288    for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) {
2289      RegionInfo hri = ProtobufUtil.toRegionInfo(regionUpdateInfo.getRegion());
2290      if (regionUpdateInfo.getFavoredNodesCount() > 0) {
2291        regionServer.updateRegionFavoredNodesMapping(hri.getEncodedName(),
2292          regionUpdateInfo.getFavoredNodesList());
2293      }
2294    }
2295    respBuilder.setResponse(openInfoList.size());
2296    return respBuilder.build();
2297  }
2298
2299  /**
2300   * Atomically bulk load several HFiles into an open region
2301   * @return true if successful, false is failed but recoverably (no action)
2302   * @throws ServiceException if failed unrecoverably
2303   */
2304  @Override
2305  public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
2306      final BulkLoadHFileRequest request) throws ServiceException {
2307    long start = EnvironmentEdgeManager.currentTime();
2308    try {
2309      checkOpen();
2310      requestCount.increment();
2311      HRegion region = getRegion(request.getRegion());
2312      Map<byte[], List<Path>> map = null;
2313
2314      // Check to see if this bulk load would exceed the space quota for this table
2315      if (QuotaUtil.isQuotaEnabled(getConfiguration())) {
2316        ActivePolicyEnforcement activeSpaceQuotas = getSpaceQuotaManager().getActiveEnforcements();
2317        SpaceViolationPolicyEnforcement enforcement = activeSpaceQuotas.getPolicyEnforcement(
2318            region);
2319        if (enforcement != null) {
2320          // Bulk loads must still be atomic. We must enact all or none.
2321          List<String> filePaths = new ArrayList<>(request.getFamilyPathCount());
2322          for (FamilyPath familyPath : request.getFamilyPathList()) {
2323            filePaths.add(familyPath.getPath());
2324          }
2325          // Check if the batch of files exceeds the current quota
2326          enforcement.checkBulkLoad(regionServer.getFileSystem(), filePaths);
2327        }
2328      }
2329
2330      List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
2331      for (FamilyPath familyPath : request.getFamilyPathList()) {
2332        familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath()));
2333      }
2334      if (!request.hasBulkToken()) {
2335        if (region.getCoprocessorHost() != null) {
2336          region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
2337        }
2338        try {
2339          map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null,
2340              request.getCopyFile());
2341        } finally {
2342          if (region.getCoprocessorHost() != null) {
2343            region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map);
2344          }
2345        }
2346      } else {
2347        // secure bulk load
2348        map = regionServer.secureBulkLoadManager.secureBulkLoadHFiles(region, request);
2349      }
2350      BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder();
2351      builder.setLoaded(map != null);
2352      return builder.build();
2353    } catch (IOException ie) {
2354      throw new ServiceException(ie);
2355    } finally {
2356      if (regionServer.metricsRegionServer != null) {
2357        regionServer.metricsRegionServer.updateBulkLoad(
2358            EnvironmentEdgeManager.currentTime() - start);
2359      }
2360    }
2361  }
2362
2363  @Override
2364  public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller,
2365      PrepareBulkLoadRequest request) throws ServiceException {
2366    try {
2367      checkOpen();
2368      requestCount.increment();
2369
2370      HRegion region = getRegion(request.getRegion());
2371
2372      String bulkToken = regionServer.secureBulkLoadManager.prepareBulkLoad(region, request);
2373      PrepareBulkLoadResponse.Builder builder = PrepareBulkLoadResponse.newBuilder();
2374      builder.setBulkToken(bulkToken);
2375      return builder.build();
2376    } catch (IOException ie) {
2377      throw new ServiceException(ie);
2378    }
2379  }
2380
2381  @Override
2382  public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller,
2383      CleanupBulkLoadRequest request) throws ServiceException {
2384    try {
2385      checkOpen();
2386      requestCount.increment();
2387
2388      HRegion region = getRegion(request.getRegion());
2389
2390      regionServer.secureBulkLoadManager.cleanupBulkLoad(region, request);
2391      CleanupBulkLoadResponse response = CleanupBulkLoadResponse.newBuilder().build();
2392      return response;
2393    } catch (IOException ie) {
2394      throw new ServiceException(ie);
2395    }
2396  }
2397
2398  @Override
2399  public CoprocessorServiceResponse execService(final RpcController controller,
2400      final CoprocessorServiceRequest request) throws ServiceException {
2401    try {
2402      checkOpen();
2403      requestCount.increment();
2404      HRegion region = getRegion(request.getRegion());
2405      com.google.protobuf.Message result = execServiceOnRegion(region, request.getCall());
2406      CoprocessorServiceResponse.Builder builder = CoprocessorServiceResponse.newBuilder();
2407      builder.setRegion(RequestConverter.buildRegionSpecifier(
2408        RegionSpecifierType.REGION_NAME, region.getRegionInfo().getRegionName()));
2409      // TODO: COPIES!!!!!!
2410      builder.setValue(builder.getValueBuilder().setName(result.getClass().getName()).
2411        setValue(org.apache.hbase.thirdparty.com.google.protobuf.ByteString.
2412            copyFrom(result.toByteArray())));
2413      return builder.build();
2414    } catch (IOException ie) {
2415      throw new ServiceException(ie);
2416    }
2417  }
2418
2419  private com.google.protobuf.Message execServiceOnRegion(HRegion region,
2420      final ClientProtos.CoprocessorServiceCall serviceCall) throws IOException {
2421    // ignore the passed in controller (from the serialized call)
2422    ServerRpcController execController = new ServerRpcController();
2423    return region.execService(execController, serviceCall);
2424  }
2425
2426  /**
2427   * Get data from a table.
2428   *
2429   * @param controller the RPC controller
2430   * @param request the get request
2431   * @throws ServiceException
2432   */
2433  @Override
2434  public GetResponse get(final RpcController controller,
2435      final GetRequest request) throws ServiceException {
2436    long before = EnvironmentEdgeManager.currentTime();
2437    OperationQuota quota = null;
2438    HRegion region = null;
2439    try {
2440      checkOpen();
2441      requestCount.increment();
2442      rpcGetRequestCount.increment();
2443      region = getRegion(request.getRegion());
2444
2445      GetResponse.Builder builder = GetResponse.newBuilder();
2446      ClientProtos.Get get = request.getGet();
2447      // An asynchbase client, https://github.com/OpenTSDB/asynchbase, starts by trying to do
2448      // a get closest before. Throwing the UnknownProtocolException signals it that it needs
2449      // to switch and do hbase2 protocol (HBase servers do not tell clients what versions
2450      // they are; its a problem for non-native clients like asynchbase. HBASE-20225.
2451      if (get.hasClosestRowBefore() && get.getClosestRowBefore()) {
2452        throw new UnknownProtocolException("Is this a pre-hbase-1.0.0 or asynchbase client? " +
2453            "Client is invoking getClosestRowBefore removed in hbase-2.0.0 replaced by " +
2454            "reverse Scan.");
2455      }
2456      Boolean existence = null;
2457      Result r = null;
2458      RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
2459      quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
2460
2461      Get clientGet = ProtobufUtil.toGet(get);
2462      if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
2463        existence = region.getCoprocessorHost().preExists(clientGet);
2464      }
2465      if (existence == null) {
2466        if (context != null) {
2467          r = get(clientGet, (region), null, context);
2468        } else {
2469          // for test purpose
2470          r = region.get(clientGet);
2471        }
2472        if (get.getExistenceOnly()) {
2473          boolean exists = r.getExists();
2474          if (region.getCoprocessorHost() != null) {
2475            exists = region.getCoprocessorHost().postExists(clientGet, exists);
2476          }
2477          existence = exists;
2478        }
2479      }
2480      if (existence != null) {
2481        ClientProtos.Result pbr =
2482            ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
2483        builder.setResult(pbr);
2484      } else if (r != null) {
2485        ClientProtos.Result pbr;
2486        if (isClientCellBlockSupport(context) && controller instanceof HBaseRpcController
2487            && VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 3)) {
2488          pbr = ProtobufUtil.toResultNoData(r);
2489          ((HBaseRpcController) controller).setCellScanner(CellUtil.createCellScanner(r
2490              .rawCells()));
2491          addSize(context, r, null);
2492        } else {
2493          pbr = ProtobufUtil.toResult(r);
2494        }
2495        builder.setResult(pbr);
2496      }
2497      //r.cells is null when an table.exists(get) call
2498      if (r != null && r.rawCells() != null) {
2499        quota.addGetResult(r);
2500      }
2501      return builder.build();
2502    } catch (IOException ie) {
2503      throw new ServiceException(ie);
2504    } finally {
2505      MetricsRegionServer mrs = regionServer.metricsRegionServer;
2506      if (mrs != null) {
2507        TableDescriptor td = region != null? region.getTableDescriptor(): null;
2508        if (td != null) {
2509          mrs.updateGet(td.getTableName(), EnvironmentEdgeManager.currentTime() - before);
2510        }
2511      }
2512      if (quota != null) {
2513        quota.close();
2514      }
2515    }
2516  }
2517
2518  private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCallBack,
2519      RpcCallContext context) throws IOException {
2520    region.prepareGet(get);
2521    boolean stale = region.getRegionInfo().getReplicaId() != 0;
2522
2523    // This method is almost the same as HRegion#get.
2524    List<Cell> results = new ArrayList<>();
2525    long before = EnvironmentEdgeManager.currentTime();
2526    // pre-get CP hook
2527    if (region.getCoprocessorHost() != null) {
2528      if (region.getCoprocessorHost().preGet(get, results)) {
2529        region.metricsUpdateForGet(results, before);
2530        return Result
2531            .create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale);
2532      }
2533    }
2534    Scan scan = new Scan(get);
2535    if (scan.getLoadColumnFamiliesOnDemandValue() == null) {
2536      scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
2537    }
2538    RegionScannerImpl scanner = null;
2539    try {
2540      scanner = region.getScanner(scan);
2541      scanner.next(results);
2542    } finally {
2543      if (scanner != null) {
2544        if (closeCallBack == null) {
2545          // If there is a context then the scanner can be added to the current
2546          // RpcCallContext. The rpc callback will take care of closing the
2547          // scanner, for eg in case
2548          // of get()
2549          context.setCallBack(scanner);
2550        } else {
2551          // The call is from multi() where the results from the get() are
2552          // aggregated and then send out to the
2553          // rpc. The rpccall back will close all such scanners created as part
2554          // of multi().
2555          closeCallBack.addScanner(scanner);
2556        }
2557      }
2558    }
2559
2560    // post-get CP hook
2561    if (region.getCoprocessorHost() != null) {
2562      region.getCoprocessorHost().postGet(get, results);
2563    }
2564    region.metricsUpdateForGet(results, before);
2565
2566    return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale);
2567  }
2568
2569  private void checkBatchSizeAndLogLargeSize(MultiRequest request) {
2570    int sum = 0;
2571    String firstRegionName = null;
2572    for (RegionAction regionAction : request.getRegionActionList()) {
2573      if (sum == 0) {
2574        firstRegionName = Bytes.toStringBinary(regionAction.getRegion().getValue().toByteArray());
2575      }
2576      sum += regionAction.getActionCount();
2577    }
2578    if (sum > rowSizeWarnThreshold) {
2579      ld.logBatchWarning(firstRegionName, sum, rowSizeWarnThreshold);
2580    }
2581  }
2582
2583  /**
2584   * Execute multiple actions on a table: get, mutate, and/or execCoprocessor
2585   *
2586   * @param rpcc the RPC controller
2587   * @param request the multi request
2588   * @throws ServiceException
2589   */
2590  @Override
2591  public MultiResponse multi(final RpcController rpcc, final MultiRequest request)
2592  throws ServiceException {
2593    try {
2594      checkOpen();
2595    } catch (IOException ie) {
2596      throw new ServiceException(ie);
2597    }
2598
2599    checkBatchSizeAndLogLargeSize(request);
2600
2601    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
2602    // It is also the conduit via which we pass back data.
2603    HBaseRpcController controller = (HBaseRpcController)rpcc;
2604    CellScanner cellScanner = controller != null ? controller.cellScanner(): null;
2605    if (controller != null) {
2606      controller.setCellScanner(null);
2607    }
2608
2609    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
2610
2611    // this will contain all the cells that we need to return. It's created later, if needed.
2612    List<CellScannable> cellsToReturn = null;
2613    MultiResponse.Builder responseBuilder = MultiResponse.newBuilder();
2614    RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder();
2615    Boolean processed = null;
2616    RegionScannersCloseCallBack closeCallBack = null;
2617    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
2618    this.rpcMultiRequestCount.increment();
2619    this.requestCount.increment();
2620    Map<RegionSpecifier, ClientProtos.RegionLoadStats> regionStats = new HashMap<>(request
2621      .getRegionActionCount());
2622    ActivePolicyEnforcement spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements();
2623    for (RegionAction regionAction : request.getRegionActionList()) {
2624      OperationQuota quota;
2625      HRegion region;
2626      regionActionResultBuilder.clear();
2627      RegionSpecifier regionSpecifier = regionAction.getRegion();
2628      try {
2629        region = getRegion(regionSpecifier);
2630        quota = getRpcQuotaManager().checkQuota(region, regionAction.getActionList());
2631      } catch (IOException e) {
2632        rpcServer.getMetrics().exception(e);
2633        regionActionResultBuilder.setException(ResponseConverter.buildException(e));
2634        responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
2635        // All Mutations in this RegionAction not executed as we can not see the Region online here
2636        // in this RS. Will be retried from Client. Skipping all the Cells in CellScanner
2637        // corresponding to these Mutations.
2638        skipCellsForMutations(regionAction.getActionList(), cellScanner);
2639        continue;  // For this region it's a failure.
2640      }
2641
2642      if (regionAction.hasAtomic() && regionAction.getAtomic()) {
2643        // How does this call happen?  It may need some work to play well w/ the surroundings.
2644        // Need to return an item per Action along w/ Action index.  TODO.
2645        try {
2646          if (request.hasCondition()) {
2647            Condition condition = request.getCondition();
2648            byte[] row = condition.getRow().toByteArray();
2649            byte[] family = condition.getFamily().toByteArray();
2650            byte[] qualifier = condition.getQualifier().toByteArray();
2651            CompareOperator op =
2652              CompareOperator.valueOf(condition.getCompareType().name());
2653            ByteArrayComparable comparator =
2654                ProtobufUtil.toComparator(condition.getComparator());
2655            TimeRange timeRange = condition.hasTimeRange() ?
2656              ProtobufUtil.toTimeRange(condition.getTimeRange()) :
2657              TimeRange.allTime();
2658            processed =
2659              checkAndRowMutate(region, regionAction.getActionList(), cellScanner, row, family,
2660                qualifier, op, comparator, timeRange, regionActionResultBuilder,
2661                spaceQuotaEnforcement);
2662          } else {
2663            doAtomicBatchOp(regionActionResultBuilder, region, quota, regionAction.getActionList(),
2664              cellScanner, spaceQuotaEnforcement);
2665            processed = Boolean.TRUE;
2666          }
2667        } catch (IOException e) {
2668          rpcServer.getMetrics().exception(e);
2669          // As it's atomic, we may expect it's a global failure.
2670          regionActionResultBuilder.setException(ResponseConverter.buildException(e));
2671        }
2672      } else {
2673        // doNonAtomicRegionMutation manages the exception internally
2674        if (context != null && closeCallBack == null) {
2675          // An RpcCallBack that creates a list of scanners that needs to perform callBack
2676          // operation on completion of multiGets.
2677          // Set this only once
2678          closeCallBack = new RegionScannersCloseCallBack();
2679          context.setCallBack(closeCallBack);
2680        }
2681        cellsToReturn = doNonAtomicRegionMutation(region, quota, regionAction, cellScanner,
2682            regionActionResultBuilder, cellsToReturn, nonceGroup, closeCallBack, context,
2683            spaceQuotaEnforcement);
2684      }
2685      responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
2686      quota.close();
2687      ClientProtos.RegionLoadStats regionLoadStats = region.getLoadStatistics();
2688      if(regionLoadStats != null) {
2689        regionStats.put(regionSpecifier, regionLoadStats);
2690      }
2691    }
2692    // Load the controller with the Cells to return.
2693    if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
2694      controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn));
2695    }
2696
2697    if (processed != null) {
2698      responseBuilder.setProcessed(processed);
2699    }
2700
2701    MultiRegionLoadStats.Builder builder = MultiRegionLoadStats.newBuilder();
2702    for(Entry<RegionSpecifier, ClientProtos.RegionLoadStats> stat: regionStats.entrySet()){
2703      builder.addRegion(stat.getKey());
2704      builder.addStat(stat.getValue());
2705    }
2706    responseBuilder.setRegionStatistics(builder);
2707    return responseBuilder.build();
2708  }
2709
2710  private void skipCellsForMutations(List<Action> actions, CellScanner cellScanner) {
2711    if (cellScanner == null) {
2712      return;
2713    }
2714    for (Action action : actions) {
2715      skipCellsForMutation(action, cellScanner);
2716    }
2717  }
2718
2719  private void skipCellsForMutation(Action action, CellScanner cellScanner) {
2720    if (cellScanner == null) {
2721      return;
2722    }
2723    try {
2724      if (action.hasMutation()) {
2725        MutationProto m = action.getMutation();
2726        if (m.hasAssociatedCellCount()) {
2727          for (int i = 0; i < m.getAssociatedCellCount(); i++) {
2728            cellScanner.advance();
2729          }
2730        }
2731      }
2732    } catch (IOException e) {
2733      // No need to handle these Individual Muatation level issue. Any way this entire RegionAction
2734      // marked as failed as we could not see the Region here. At client side the top level
2735      // RegionAction exception will be considered first.
2736      LOG.error("Error while skipping Cells in CellScanner for invalid Region Mutations", e);
2737    }
2738  }
2739
2740  /**
2741   * Mutate data in a table.
2742   *
2743   * @param rpcc the RPC controller
2744   * @param request the mutate request
2745   */
2746  @Override
2747  public MutateResponse mutate(final RpcController rpcc,
2748      final MutateRequest request) throws ServiceException {
2749    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
2750    // It is also the conduit via which we pass back data.
2751    HBaseRpcController controller = (HBaseRpcController)rpcc;
2752    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
2753    OperationQuota quota = null;
2754    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
2755    ActivePolicyEnforcement spaceQuotaEnforcement = null;
2756    MutationType type = null;
2757    HRegion region = null;
2758    long before = EnvironmentEdgeManager.currentTime();
2759    // Clear scanner so we are not holding on to reference across call.
2760    if (controller != null) {
2761      controller.setCellScanner(null);
2762    }
2763    try {
2764      checkOpen();
2765      requestCount.increment();
2766      rpcMutateRequestCount.increment();
2767      region = getRegion(request.getRegion());
2768      MutateResponse.Builder builder = MutateResponse.newBuilder();
2769      MutationProto mutation = request.getMutation();
2770      if (!region.getRegionInfo().isMetaRegion()) {
2771        regionServer.cacheFlusher.reclaimMemStoreMemory();
2772      }
2773      long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
2774      Result r = null;
2775      Boolean processed = null;
2776      type = mutation.getMutateType();
2777
2778      quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
2779      spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements();
2780
2781      switch (type) {
2782        case APPEND:
2783          // TODO: this doesn't actually check anything.
2784          r = append(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
2785          break;
2786        case INCREMENT:
2787          // TODO: this doesn't actually check anything.
2788          r = increment(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
2789          break;
2790        case PUT:
2791          Put put = ProtobufUtil.toPut(mutation, cellScanner);
2792          checkCellSizeLimit(region, put);
2793          // Throws an exception when violated
2794          spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
2795          quota.addMutation(put);
2796          if (request.hasCondition()) {
2797            Condition condition = request.getCondition();
2798            byte[] row = condition.getRow().toByteArray();
2799            byte[] family = condition.getFamily().toByteArray();
2800            byte[] qualifier = condition.getQualifier().toByteArray();
2801            CompareOperator compareOp =
2802              CompareOperator.valueOf(condition.getCompareType().name());
2803            ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
2804            TimeRange timeRange = condition.hasTimeRange() ?
2805              ProtobufUtil.toTimeRange(condition.getTimeRange()) :
2806              TimeRange.allTime();
2807            if (region.getCoprocessorHost() != null) {
2808              processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier,
2809                  compareOp, comparator, put);
2810            }
2811            if (processed == null) {
2812              boolean result = region.checkAndMutate(row, family,
2813                qualifier, compareOp, comparator, timeRange, put);
2814              if (region.getCoprocessorHost() != null) {
2815                result = region.getCoprocessorHost().postCheckAndPut(row, family,
2816                  qualifier, compareOp, comparator, put, result);
2817              }
2818              processed = result;
2819            }
2820          } else {
2821            region.put(put);
2822            processed = Boolean.TRUE;
2823          }
2824          break;
2825        case DELETE:
2826          Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
2827          checkCellSizeLimit(region, delete);
2828          spaceQuotaEnforcement.getPolicyEnforcement(region).check(delete);
2829          quota.addMutation(delete);
2830          if (request.hasCondition()) {
2831            Condition condition = request.getCondition();
2832            byte[] row = condition.getRow().toByteArray();
2833            byte[] family = condition.getFamily().toByteArray();
2834            byte[] qualifier = condition.getQualifier().toByteArray();
2835            CompareOperator op = CompareOperator.valueOf(condition.getCompareType().name());
2836            ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
2837            TimeRange timeRange = condition.hasTimeRange() ?
2838              ProtobufUtil.toTimeRange(condition.getTimeRange()) :
2839              TimeRange.allTime();
2840            if (region.getCoprocessorHost() != null) {
2841              processed = region.getCoprocessorHost().preCheckAndDelete(row, family, qualifier, op,
2842                  comparator, delete);
2843            }
2844            if (processed == null) {
2845              boolean result = region.checkAndMutate(row, family,
2846                qualifier, op, comparator, timeRange, delete);
2847              if (region.getCoprocessorHost() != null) {
2848                result = region.getCoprocessorHost().postCheckAndDelete(row, family,
2849                  qualifier, op, comparator, delete, result);
2850              }
2851              processed = result;
2852            }
2853          } else {
2854            region.delete(delete);
2855            processed = Boolean.TRUE;
2856          }
2857          break;
2858        default:
2859          throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
2860      }
2861      if (processed != null) {
2862        builder.setProcessed(processed.booleanValue());
2863      }
2864      boolean clientCellBlockSupported = isClientCellBlockSupport(context);
2865      addResult(builder, r, controller, clientCellBlockSupported);
2866      if (clientCellBlockSupported) {
2867        addSize(context, r, null);
2868      }
2869      return builder.build();
2870    } catch (IOException ie) {
2871      regionServer.checkFileSystem();
2872      throw new ServiceException(ie);
2873    } finally {
2874      if (quota != null) {
2875        quota.close();
2876      }
2877      // Update metrics
2878      if (regionServer.metricsRegionServer != null && type != null) {
2879        long after = EnvironmentEdgeManager.currentTime();
2880        switch (type) {
2881        case DELETE:
2882          if (request.hasCondition()) {
2883            regionServer.metricsRegionServer.updateCheckAndDelete(after - before);
2884          } else {
2885            regionServer.metricsRegionServer.updateDelete(
2886                region == null ? null : region.getRegionInfo().getTable(), after - before);
2887          }
2888          break;
2889        case PUT:
2890          if (request.hasCondition()) {
2891            regionServer.metricsRegionServer.updateCheckAndPut(after - before);
2892          } else {
2893            regionServer.metricsRegionServer.updatePut(
2894                region == null ? null : region.getRegionInfo().getTable(),after - before);
2895          }
2896          break;
2897        default:
2898          break;
2899
2900        }
2901      }
2902    }
2903  }
2904
2905  // This is used to keep compatible with the old client implementation. Consider remove it if we
2906  // decide to drop the support of the client that still sends close request to a region scanner
2907  // which has already been exhausted.
2908  @Deprecated
2909  private static final IOException SCANNER_ALREADY_CLOSED = new IOException() {
2910
2911    private static final long serialVersionUID = -4305297078988180130L;
2912
2913    @Override
2914    public synchronized Throwable fillInStackTrace() {
2915      return this;
2916    }
2917  };
2918
2919  private RegionScannerHolder getRegionScanner(ScanRequest request) throws IOException {
2920    String scannerName = Long.toString(request.getScannerId());
2921    RegionScannerHolder rsh = scanners.get(scannerName);
2922    if (rsh == null) {
2923      // just ignore the next or close request if scanner does not exists.
2924      if (closedScanners.getIfPresent(scannerName) != null) {
2925        throw SCANNER_ALREADY_CLOSED;
2926      } else {
2927        LOG.warn("Client tried to access missing scanner " + scannerName);
2928        throw new UnknownScannerException(
2929            "Unknown scanner '" + scannerName + "'. This can happen due to any of the following " +
2930                "reasons: a) Scanner id given is wrong, b) Scanner lease expired because of " +
2931                "long wait between consecutive client checkins, c) Server may be closing down, " +
2932                "d) RegionServer restart during upgrade.\nIf the issue is due to reason (b), a " +
2933                "possible fix would be increasing the value of" +
2934                "'hbase.client.scanner.timeout.period' configuration.");
2935      }
2936    }
2937    RegionInfo hri = rsh.s.getRegionInfo();
2938    // Yes, should be the same instance
2939    if (regionServer.getOnlineRegion(hri.getRegionName()) != rsh.r) {
2940      String msg = "Region has changed on the scanner " + scannerName + ": regionName="
2941          + hri.getRegionNameAsString() + ", scannerRegionName=" + rsh.r;
2942      LOG.warn(msg + ", closing...");
2943      scanners.remove(scannerName);
2944      try {
2945        rsh.s.close();
2946      } catch (IOException e) {
2947        LOG.warn("Getting exception closing " + scannerName, e);
2948      } finally {
2949        try {
2950          regionServer.leases.cancelLease(scannerName);
2951        } catch (LeaseException e) {
2952          LOG.warn("Getting exception closing " + scannerName, e);
2953        }
2954      }
2955      throw new NotServingRegionException(msg);
2956    }
2957    return rsh;
2958  }
2959
2960  private RegionScannerHolder newRegionScanner(ScanRequest request, ScanResponse.Builder builder)
2961      throws IOException {
2962    HRegion region = getRegion(request.getRegion());
2963    ClientProtos.Scan protoScan = request.getScan();
2964    boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
2965    Scan scan = ProtobufUtil.toScan(protoScan);
2966    // if the request doesn't set this, get the default region setting.
2967    if (!isLoadingCfsOnDemandSet) {
2968      scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
2969    }
2970
2971    if (!scan.hasFamilies()) {
2972      // Adding all families to scanner
2973      for (byte[] family : region.getTableDescriptor().getColumnFamilyNames()) {
2974        scan.addFamily(family);
2975      }
2976    }
2977    if (region.getCoprocessorHost() != null) {
2978      // preScannerOpen is not allowed to return a RegionScanner. Only post hook can create a
2979      // wrapper for the core created RegionScanner
2980      region.getCoprocessorHost().preScannerOpen(scan);
2981    }
2982    RegionScannerImpl coreScanner = region.getScanner(scan);
2983    Shipper shipper = coreScanner;
2984    RegionScanner scanner = coreScanner;
2985    if (region.getCoprocessorHost() != null) {
2986      scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner);
2987    }
2988    long scannerId = scannerIdGenerator.generateNewScannerId();
2989    builder.setScannerId(scannerId);
2990    builder.setMvccReadPoint(scanner.getMvccReadPoint());
2991    builder.setTtl(scannerLeaseTimeoutPeriod);
2992    String scannerName = String.valueOf(scannerId);
2993    return addScanner(scannerName, scanner, shipper, region, scan.isNeedCursorResult());
2994  }
2995
2996  private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh)
2997      throws OutOfOrderScannerNextException {
2998    // if nextCallSeq does not match throw Exception straight away. This needs to be
2999    // performed even before checking of Lease.
3000    // See HBASE-5974
3001    if (request.hasNextCallSeq()) {
3002      long callSeq = request.getNextCallSeq();
3003      if (!rsh.incNextCallSeq(callSeq)) {
3004        throw new OutOfOrderScannerNextException("Expected nextCallSeq: " + rsh.getNextCallSeq()
3005            + " But the nextCallSeq got from client: " + request.getNextCallSeq() + "; request="
3006            + TextFormat.shortDebugString(request));
3007      }
3008    }
3009  }
3010
3011  private void addScannerLeaseBack(Leases.Lease lease) {
3012    try {
3013      regionServer.leases.addLease(lease);
3014    } catch (LeaseStillHeldException e) {
3015      // should not happen as the scanner id is unique.
3016      throw new AssertionError(e);
3017    }
3018  }
3019
3020  private long getTimeLimit(HBaseRpcController controller, boolean allowHeartbeatMessages) {
3021    // Set the time limit to be half of the more restrictive timeout value (one of the
3022    // timeout values must be positive). In the event that both values are positive, the
3023    // more restrictive of the two is used to calculate the limit.
3024    if (allowHeartbeatMessages && (scannerLeaseTimeoutPeriod > 0 || rpcTimeout > 0)) {
3025      long timeLimitDelta;
3026      if (scannerLeaseTimeoutPeriod > 0 && rpcTimeout > 0) {
3027        timeLimitDelta = Math.min(scannerLeaseTimeoutPeriod, rpcTimeout);
3028      } else {
3029        timeLimitDelta = scannerLeaseTimeoutPeriod > 0 ? scannerLeaseTimeoutPeriod : rpcTimeout;
3030      }
3031      if (controller != null && controller.getCallTimeout() > 0) {
3032        timeLimitDelta = Math.min(timeLimitDelta, controller.getCallTimeout());
3033      }
3034      // Use half of whichever timeout value was more restrictive... But don't allow
3035      // the time limit to be less than the allowable minimum (could cause an
3036      // immediatate timeout before scanning any data).
3037      timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta);
3038      // XXX: Can not use EnvironmentEdge here because TestIncrementTimeRange use a
3039      // ManualEnvironmentEdge. Consider using System.nanoTime instead.
3040      return System.currentTimeMillis() + timeLimitDelta;
3041    }
3042    // Default value of timeLimit is negative to indicate no timeLimit should be
3043    // enforced.
3044    return -1L;
3045  }
3046
3047  private void checkLimitOfRows(int numOfCompleteRows, int limitOfRows, boolean moreRows,
3048      ScannerContext scannerContext, ScanResponse.Builder builder) {
3049    if (numOfCompleteRows >= limitOfRows) {
3050      if (LOG.isTraceEnabled()) {
3051        LOG.trace("Done scanning, limit of rows reached, moreRows: " + moreRows +
3052            " scannerContext: " + scannerContext);
3053      }
3054      builder.setMoreResults(false);
3055    }
3056  }
3057
3058  // return whether we have more results in region.
3059  private void scan(HBaseRpcController controller, ScanRequest request, RegionScannerHolder rsh,
3060      long maxQuotaResultSize, int maxResults, int limitOfRows, List<Result> results,
3061      ScanResponse.Builder builder, MutableObject lastBlock, RpcCallContext context)
3062      throws IOException {
3063    HRegion region = rsh.r;
3064    RegionScanner scanner = rsh.s;
3065    long maxResultSize;
3066    if (scanner.getMaxResultSize() > 0) {
3067      maxResultSize = Math.min(scanner.getMaxResultSize(), maxQuotaResultSize);
3068    } else {
3069      maxResultSize = maxQuotaResultSize;
3070    }
3071    // This is cells inside a row. Default size is 10 so if many versions or many cfs,
3072    // then we'll resize. Resizings show in profiler. Set it higher than 10. For now
3073    // arbitrary 32. TODO: keep record of general size of results being returned.
3074    List<Cell> values = new ArrayList<>(32);
3075    region.startRegionOperation(Operation.SCAN);
3076    try {
3077      int numOfResults = 0;
3078      int numOfCompleteRows = 0;
3079      long before = EnvironmentEdgeManager.currentTime();
3080      synchronized (scanner) {
3081        boolean stale = (region.getRegionInfo().getReplicaId() != 0);
3082        boolean clientHandlesPartials =
3083            request.hasClientHandlesPartials() && request.getClientHandlesPartials();
3084        boolean clientHandlesHeartbeats =
3085            request.hasClientHandlesHeartbeats() && request.getClientHandlesHeartbeats();
3086
3087        // On the server side we must ensure that the correct ordering of partial results is
3088        // returned to the client to allow them to properly reconstruct the partial results.
3089        // If the coprocessor host is adding to the result list, we cannot guarantee the
3090        // correct ordering of partial results and so we prevent partial results from being
3091        // formed.
3092        boolean serverGuaranteesOrderOfPartials = results.isEmpty();
3093        boolean allowPartialResults = clientHandlesPartials && serverGuaranteesOrderOfPartials;
3094        boolean moreRows = false;
3095
3096        // Heartbeat messages occur when the processing of the ScanRequest is exceeds a
3097        // certain time threshold on the server. When the time threshold is exceeded, the
3098        // server stops the scan and sends back whatever Results it has accumulated within
3099        // that time period (may be empty). Since heartbeat messages have the potential to
3100        // create partial Results (in the event that the timeout occurs in the middle of a
3101        // row), we must only generate heartbeat messages when the client can handle both
3102        // heartbeats AND partials
3103        boolean allowHeartbeatMessages = clientHandlesHeartbeats && allowPartialResults;
3104
3105        long timeLimit = getTimeLimit(controller, allowHeartbeatMessages);
3106
3107        final LimitScope sizeScope =
3108            allowPartialResults ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
3109        final LimitScope timeScope =
3110            allowHeartbeatMessages ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
3111
3112        boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics();
3113
3114        // Configure with limits for this RPC. Set keep progress true since size progress
3115        // towards size limit should be kept between calls to nextRaw
3116        ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true);
3117        // maxResultSize - either we can reach this much size for all cells(being read) data or sum
3118        // of heap size occupied by cells(being read). Cell data means its key and value parts.
3119        contextBuilder.setSizeLimit(sizeScope, maxResultSize, maxResultSize);
3120        contextBuilder.setBatchLimit(scanner.getBatch());
3121        contextBuilder.setTimeLimit(timeScope, timeLimit);
3122        contextBuilder.setTrackMetrics(trackMetrics);
3123        ScannerContext scannerContext = contextBuilder.build();
3124        boolean limitReached = false;
3125        while (numOfResults < maxResults) {
3126          // Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The
3127          // batch limit is a limit on the number of cells per Result. Thus, if progress is
3128          // being tracked (i.e. scannerContext.keepProgress() is true) then we need to
3129          // reset the batch progress between nextRaw invocations since we don't want the
3130          // batch progress from previous calls to affect future calls
3131          scannerContext.setBatchProgress(0);
3132
3133          // Collect values to be returned here
3134          moreRows = scanner.nextRaw(values, scannerContext);
3135
3136          if (!values.isEmpty()) {
3137            if (limitOfRows > 0) {
3138              // First we need to check if the last result is partial and we have a row change. If
3139              // so then we need to increase the numOfCompleteRows.
3140              if (results.isEmpty()) {
3141                if (rsh.rowOfLastPartialResult != null &&
3142                    !CellUtil.matchingRows(values.get(0), rsh.rowOfLastPartialResult)) {
3143                  numOfCompleteRows++;
3144                  checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
3145                    builder);
3146                }
3147              } else {
3148                Result lastResult = results.get(results.size() - 1);
3149                if (lastResult.mayHaveMoreCellsInRow() &&
3150                    !CellUtil.matchingRows(values.get(0), lastResult.getRow())) {
3151                  numOfCompleteRows++;
3152                  checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
3153                    builder);
3154                }
3155              }
3156              if (builder.hasMoreResults() && !builder.getMoreResults()) {
3157                break;
3158              }
3159            }
3160            boolean mayHaveMoreCellsInRow = scannerContext.mayHaveMoreCellsInRow();
3161            Result r = Result.create(values, null, stale, mayHaveMoreCellsInRow);
3162            lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
3163            results.add(r);
3164            numOfResults++;
3165            if (!mayHaveMoreCellsInRow && limitOfRows > 0) {
3166              numOfCompleteRows++;
3167              checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext, builder);
3168              if (builder.hasMoreResults() && !builder.getMoreResults()) {
3169                break;
3170              }
3171            }
3172          } else if (!moreRows && !results.isEmpty()) {
3173            // No more cells for the scan here, we need to ensure that the mayHaveMoreCellsInRow of
3174            // last result is false. Otherwise it's possible that: the first nextRaw returned
3175            // because BATCH_LIMIT_REACHED (BTW it happen to exhaust all cells of the scan),so the
3176            // last result's mayHaveMoreCellsInRow will be true. while the following nextRaw will
3177            // return with moreRows=false, which means moreResultsInRegion would be false, it will
3178            // be a contradictory state (HBASE-21206).
3179            int lastIdx = results.size() - 1;
3180            Result r = results.get(lastIdx);
3181            if (r.mayHaveMoreCellsInRow()) {
3182              results.set(lastIdx, Result.create(r.rawCells(), r.getExists(), r.isStale(), false));
3183            }
3184          }
3185          boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS);
3186          boolean timeLimitReached = scannerContext.checkTimeLimit(LimitScope.BETWEEN_ROWS);
3187          boolean resultsLimitReached = numOfResults >= maxResults;
3188          limitReached = sizeLimitReached || timeLimitReached || resultsLimitReached;
3189
3190          if (limitReached || !moreRows) {
3191            if (LOG.isTraceEnabled()) {
3192              LOG.trace("Done scanning. limitReached: " + limitReached + " moreRows: " + moreRows
3193                  + " scannerContext: " + scannerContext);
3194            }
3195            // We only want to mark a ScanResponse as a heartbeat message in the event that
3196            // there are more values to be read server side. If there aren't more values,
3197            // marking it as a heartbeat is wasteful because the client will need to issue
3198            // another ScanRequest only to realize that they already have all the values
3199            if (moreRows && timeLimitReached) {
3200              // Heartbeat messages occur when the time limit has been reached.
3201              builder.setHeartbeatMessage(true);
3202              if (rsh.needCursor) {
3203                Cell cursorCell = scannerContext.getLastPeekedCell();
3204                if (cursorCell != null) {
3205                  builder.setCursor(ProtobufUtil.toCursor(cursorCell));
3206                }
3207              }
3208            }
3209            break;
3210          }
3211          values.clear();
3212        }
3213        builder.setMoreResultsInRegion(moreRows);
3214        // Check to see if the client requested that we track metrics server side. If the
3215        // client requested metrics, retrieve the metrics from the scanner context.
3216        if (trackMetrics) {
3217          Map<String, Long> metrics = scannerContext.getMetrics().getMetricsMap();
3218          ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder();
3219          NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder();
3220
3221          for (Entry<String, Long> entry : metrics.entrySet()) {
3222            pairBuilder.setName(entry.getKey());
3223            pairBuilder.setValue(entry.getValue());
3224            metricBuilder.addMetrics(pairBuilder.build());
3225          }
3226
3227          builder.setScanMetrics(metricBuilder.build());
3228        }
3229      }
3230      long end = EnvironmentEdgeManager.currentTime();
3231      long responseCellSize = context != null ? context.getResponseCellSize() : 0;
3232      region.getMetrics().updateScanTime(end - before);
3233      if (regionServer.metricsRegionServer != null) {
3234        regionServer.metricsRegionServer.updateScanSize(
3235            region.getTableDescriptor().getTableName(), responseCellSize);
3236        regionServer.metricsRegionServer.updateScanTime(
3237            region.getTableDescriptor().getTableName(), end - before);
3238      }
3239    } finally {
3240      region.closeRegionOperation();
3241    }
3242    // coprocessor postNext hook
3243    if (region.getCoprocessorHost() != null) {
3244      region.getCoprocessorHost().postScannerNext(scanner, results, maxResults, true);
3245    }
3246  }
3247
3248  /**
3249   * Scan data in a table.
3250   *
3251   * @param controller the RPC controller
3252   * @param request the scan request
3253   * @throws ServiceException
3254   */
3255  @Override
3256  public ScanResponse scan(final RpcController controller, final ScanRequest request)
3257      throws ServiceException {
3258    if (controller != null && !(controller instanceof HBaseRpcController)) {
3259      throw new UnsupportedOperationException(
3260          "We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
3261    }
3262    if (!request.hasScannerId() && !request.hasScan()) {
3263      throw new ServiceException(
3264          new DoNotRetryIOException("Missing required input: scannerId or scan"));
3265    }
3266    try {
3267      checkOpen();
3268    } catch (IOException e) {
3269      if (request.hasScannerId()) {
3270        String scannerName = Long.toString(request.getScannerId());
3271        if (LOG.isDebugEnabled()) {
3272          LOG.debug(
3273            "Server shutting down and client tried to access missing scanner " + scannerName);
3274        }
3275        if (regionServer.leases != null) {
3276          try {
3277            regionServer.leases.cancelLease(scannerName);
3278          } catch (LeaseException le) {
3279            // No problem, ignore
3280            if (LOG.isTraceEnabled()) {
3281              LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
3282            }
3283          }
3284        }
3285      }
3286      throw new ServiceException(e);
3287    }
3288    requestCount.increment();
3289    rpcScanRequestCount.increment();
3290    RegionScannerHolder rsh;
3291    ScanResponse.Builder builder = ScanResponse.newBuilder();
3292    try {
3293      if (request.hasScannerId()) {
3294        // The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
3295        // for more details.
3296        builder.setScannerId(request.getScannerId());
3297        rsh = getRegionScanner(request);
3298      } else {
3299        rsh = newRegionScanner(request, builder);
3300      }
3301    } catch (IOException e) {
3302      if (e == SCANNER_ALREADY_CLOSED) {
3303        // Now we will close scanner automatically if there are no more results for this region but
3304        // the old client will still send a close request to us. Just ignore it and return.
3305        return builder.build();
3306      }
3307      throw new ServiceException(e);
3308    }
3309    HRegion region = rsh.r;
3310    String scannerName = rsh.scannerName;
3311    Leases.Lease lease;
3312    try {
3313      // Remove lease while its being processed in server; protects against case
3314      // where processing of request takes > lease expiration time.
3315      lease = regionServer.leases.removeLease(scannerName);
3316    } catch (LeaseException e) {
3317      throw new ServiceException(e);
3318    }
3319    if (request.hasRenew() && request.getRenew()) {
3320      // add back and return
3321      addScannerLeaseBack(lease);
3322      try {
3323        checkScanNextCallSeq(request, rsh);
3324      } catch (OutOfOrderScannerNextException e) {
3325        throw new ServiceException(e);
3326      }
3327      return builder.build();
3328    }
3329    OperationQuota quota;
3330    try {
3331      quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
3332    } catch (IOException e) {
3333      addScannerLeaseBack(lease);
3334      throw new ServiceException(e);
3335    }
3336    try {
3337      checkScanNextCallSeq(request, rsh);
3338    } catch (OutOfOrderScannerNextException e) {
3339      addScannerLeaseBack(lease);
3340      throw new ServiceException(e);
3341    }
3342    // Now we have increased the next call sequence. If we give client an error, the retry will
3343    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
3344    // and then client will try to open a new scanner.
3345    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
3346    int rows; // this is scan.getCaching
3347    if (request.hasNumberOfRows()) {
3348      rows = request.getNumberOfRows();
3349    } else {
3350      rows = closeScanner ? 0 : 1;
3351    }
3352    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
3353    // now let's do the real scan.
3354    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
3355    RegionScanner scanner = rsh.s;
3356    // this is the limit of rows for this scan, if we the number of rows reach this value, we will
3357    // close the scanner.
3358    int limitOfRows;
3359    if (request.hasLimitOfRows()) {
3360      limitOfRows = request.getLimitOfRows();
3361    } else {
3362      limitOfRows = -1;
3363    }
3364    MutableObject<Object> lastBlock = new MutableObject<>();
3365    boolean scannerClosed = false;
3366    try {
3367      List<Result> results = new ArrayList<>();
3368      if (rows > 0) {
3369        boolean done = false;
3370        // Call coprocessor. Get region info from scanner.
3371        if (region.getCoprocessorHost() != null) {
3372          Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
3373          if (!results.isEmpty()) {
3374            for (Result r : results) {
3375              lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
3376            }
3377          }
3378          if (bypass != null && bypass.booleanValue()) {
3379            done = true;
3380          }
3381        }
3382        if (!done) {
3383          scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows,
3384            results, builder, lastBlock, context);
3385        } else {
3386          builder.setMoreResultsInRegion(!results.isEmpty());
3387        }
3388      } else {
3389        // This is a open scanner call with numberOfRow = 0, so set more results in region to true.
3390        builder.setMoreResultsInRegion(true);
3391      }
3392
3393      quota.addScanResult(results);
3394      addResults(builder, results, (HBaseRpcController) controller,
3395        RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()),
3396        isClientCellBlockSupport(context));
3397      if (scanner.isFilterDone() && results.isEmpty()) {
3398        // If the scanner's filter - if any - is done with the scan
3399        // only set moreResults to false if the results is empty. This is used to keep compatible
3400        // with the old scan implementation where we just ignore the returned results if moreResults
3401        // is false. Can remove the isEmpty check after we get rid of the old implementation.
3402        builder.setMoreResults(false);
3403      }
3404      // Later we may close the scanner depending on this flag so here we need to make sure that we
3405      // have already set this flag.
3406      assert builder.hasMoreResultsInRegion();
3407      // we only set moreResults to false in the above code, so set it to true if we haven't set it
3408      // yet.
3409      if (!builder.hasMoreResults()) {
3410        builder.setMoreResults(true);
3411      }
3412      if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
3413        // Record the last cell of the last result if it is a partial result
3414        // We need this to calculate the complete rows we have returned to client as the
3415        // mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
3416        // current row. We may filter out all the remaining cells for the current row and just
3417        // return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
3418        // check for row change.
3419        Result lastResult = results.get(results.size() - 1);
3420        if (lastResult.mayHaveMoreCellsInRow()) {
3421          rsh.rowOfLastPartialResult = lastResult.getRow();
3422        } else {
3423          rsh.rowOfLastPartialResult = null;
3424        }
3425      }
3426      if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
3427        scannerClosed = true;
3428        closeScanner(region, scanner, scannerName, context);
3429      }
3430      return builder.build();
3431    } catch (IOException e) {
3432      try {
3433        // scanner is closed here
3434        scannerClosed = true;
3435        // The scanner state might be left in a dirty state, so we will tell the Client to
3436        // fail this RPC and close the scanner while opening up another one from the start of
3437        // row that the client has last seen.
3438        closeScanner(region, scanner, scannerName, context);
3439
3440        // If it is a DoNotRetryIOException already, throw as it is. Unfortunately, DNRIOE is
3441        // used in two different semantics.
3442        // (1) The first is to close the client scanner and bubble up the exception all the way
3443        // to the application. This is preferred when the exception is really un-recoverable
3444        // (like CorruptHFileException, etc). Plain DoNotRetryIOException also falls into this
3445        // bucket usually.
3446        // (2) Second semantics is to close the current region scanner only, but continue the
3447        // client scanner by overriding the exception. This is usually UnknownScannerException,
3448        // OutOfOrderScannerNextException, etc where the region scanner has to be closed, but the
3449        // application-level ClientScanner has to continue without bubbling up the exception to
3450        // the client. See ClientScanner code to see how it deals with these special exceptions.
3451        if (e instanceof DoNotRetryIOException) {
3452          throw e;
3453        }
3454
3455        // If it is a FileNotFoundException, wrap as a
3456        // DoNotRetryIOException. This can avoid the retry in ClientScanner.
3457        if (e instanceof FileNotFoundException) {
3458          throw new DoNotRetryIOException(e);
3459        }
3460
3461        // We closed the scanner already. Instead of throwing the IOException, and client
3462        // retrying with the same scannerId only to get USE on the next RPC, we directly throw
3463        // a special exception to save an RPC.
3464        if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
3465          // 1.4.0+ clients know how to handle
3466          throw new ScannerResetException("Scanner is closed on the server-side", e);
3467        } else {
3468          // older clients do not know about SRE. Just throw USE, which they will handle
3469          throw new UnknownScannerException("Throwing UnknownScannerException to reset the client"
3470              + " scanner state for clients older than 1.3.", e);
3471        }
3472      } catch (IOException ioe) {
3473        throw new ServiceException(ioe);
3474      }
3475    } finally {
3476      if (!scannerClosed) {
3477        // Adding resets expiration time on lease.
3478        // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
3479        if (context != null) {
3480          context.setCallBack(rsh.shippedCallback);
3481        } else {
3482          // When context != null, adding back the lease will be done in callback set above.
3483          addScannerLeaseBack(lease);
3484        }
3485      }
3486      quota.close();
3487    }
3488  }
3489
3490  private void closeScanner(HRegion region, RegionScanner scanner, String scannerName,
3491      RpcCallContext context) throws IOException {
3492    if (region.getCoprocessorHost() != null) {
3493      if (region.getCoprocessorHost().preScannerClose(scanner)) {
3494        // bypass the actual close.
3495        return;
3496      }
3497    }
3498    RegionScannerHolder rsh = scanners.remove(scannerName);
3499    if (rsh != null) {
3500      if (context != null) {
3501        context.setCallBack(rsh.closeCallBack);
3502      } else {
3503        rsh.s.close();
3504      }
3505      if (region.getCoprocessorHost() != null) {
3506        region.getCoprocessorHost().postScannerClose(scanner);
3507      }
3508      closedScanners.put(scannerName, scannerName);
3509    }
3510  }
3511
3512  @Override
3513  public CoprocessorServiceResponse execRegionServerService(RpcController controller,
3514      CoprocessorServiceRequest request) throws ServiceException {
3515    rpcPreCheck("execRegionServerService");
3516    return regionServer.execRegionServerService(controller, request);
3517  }
3518
3519  @Override
3520  public UpdateConfigurationResponse updateConfiguration(
3521      RpcController controller, UpdateConfigurationRequest request)
3522      throws ServiceException {
3523    try {
3524      this.regionServer.updateConfiguration();
3525    } catch (Exception e) {
3526      throw new ServiceException(e);
3527    }
3528    return UpdateConfigurationResponse.getDefaultInstance();
3529  }
3530
3531  @Override
3532  public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(
3533      RpcController controller, GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
3534    try {
3535      final RegionServerSpaceQuotaManager manager =
3536          regionServer.getRegionServerSpaceQuotaManager();
3537      final GetSpaceQuotaSnapshotsResponse.Builder builder =
3538          GetSpaceQuotaSnapshotsResponse.newBuilder();
3539      if (manager != null) {
3540        final Map<TableName,SpaceQuotaSnapshot> snapshots = manager.copyQuotaSnapshots();
3541        for (Entry<TableName,SpaceQuotaSnapshot> snapshot : snapshots.entrySet()) {
3542          builder.addSnapshots(TableQuotaSnapshot.newBuilder()
3543              .setTableName(ProtobufUtil.toProtoTableName(snapshot.getKey()))
3544              .setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(snapshot.getValue()))
3545              .build());
3546        }
3547      }
3548      return builder.build();
3549    } catch (Exception e) {
3550      throw new ServiceException(e);
3551    }
3552  }
3553
3554  @Override
3555  public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
3556      ClearRegionBlockCacheRequest request) {
3557    ClearRegionBlockCacheResponse.Builder builder =
3558        ClearRegionBlockCacheResponse.newBuilder();
3559    CacheEvictionStatsBuilder stats = CacheEvictionStats.builder();
3560    List<HRegion> regions = getRegions(request.getRegionList(), stats);
3561    for (HRegion region : regions) {
3562      try {
3563        stats = stats.append(this.regionServer.clearRegionBlockCache(region));
3564      } catch (Exception e) {
3565        stats.addException(region.getRegionInfo().getRegionName(), e);
3566      }
3567    }
3568    stats.withMaxCacheSize(regionServer.getCacheConfig().getBlockCache().getMaxSize());
3569    return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build();
3570  }
3571
3572  @Override
3573  @QosPriority(priority = HConstants.ADMIN_QOS)
3574  public ExecuteProceduresResponse executeProcedures(RpcController controller,
3575      ExecuteProceduresRequest request) throws ServiceException {
3576    try {
3577      checkOpen();
3578      regionServer.getRegionServerCoprocessorHost().preExecuteProcedures();
3579      if (request.getOpenRegionCount() > 0) {
3580        for (OpenRegionRequest req : request.getOpenRegionList()) {
3581          openRegion(controller, req);
3582        }
3583      }
3584      if (request.getCloseRegionCount() > 0) {
3585        for (CloseRegionRequest req : request.getCloseRegionList()) {
3586          closeRegion(controller, req);
3587        }
3588      }
3589      if (request.getProcCount() > 0) {
3590        for (RemoteProcedureRequest req : request.getProcList()) {
3591          RSProcedureCallable callable;
3592          try {
3593            callable = Class.forName(req.getProcClass()).asSubclass(RSProcedureCallable.class)
3594              .getDeclaredConstructor().newInstance();
3595          } catch (Exception e) {
3596            regionServer.remoteProcedureComplete(req.getProcId(), e);
3597            continue;
3598          }
3599          callable.init(req.getProcData().toByteArray(), regionServer);
3600          regionServer.executeProcedure(req.getProcId(), callable);
3601        }
3602      }
3603      regionServer.getRegionServerCoprocessorHost().postExecuteProcedures();
3604      return ExecuteProceduresResponse.getDefaultInstance();
3605    } catch (IOException e) {
3606      throw new ServiceException(e);
3607    }
3608  }
3609
3610  @VisibleForTesting
3611  public RpcScheduler getRpcScheduler() {
3612    return rpcServer.getScheduler();
3613  }
3614}