001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to you under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.hadoop.hbase.quotas;
018
019import java.io.IOException;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Collections;
023import java.util.HashMap;
024import java.util.HashSet;
025import java.util.Iterator;
026import java.util.List;
027import java.util.Map;
028import java.util.Map.Entry;
029import java.util.Objects;
030import java.util.Set;
031import java.util.concurrent.locks.ReentrantReadWriteLock;
032import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
033import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
034import java.util.function.Predicate;
035import java.util.stream.Collectors;
036import org.apache.commons.lang3.builder.HashCodeBuilder;
037import org.apache.hadoop.conf.Configuration;
038import org.apache.hadoop.fs.FileStatus;
039import org.apache.hadoop.fs.FileSystem;
040import org.apache.hadoop.fs.Path;
041import org.apache.hadoop.hbase.TableName;
042import org.apache.hadoop.hbase.client.Connection;
043import org.apache.hadoop.hbase.client.Get;
044import org.apache.hadoop.hbase.client.Put;
045import org.apache.hadoop.hbase.client.Result;
046import org.apache.hadoop.hbase.client.Table;
047import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
048import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
049import org.apache.hadoop.hbase.util.CommonFSUtils;
050import org.apache.hadoop.hbase.util.FSUtils;
051import org.apache.hadoop.hbase.util.HFileArchiveUtil;
052import org.apache.hadoop.util.StringUtils;
053import org.apache.yetus.audience.InterfaceAudience;
054import org.slf4j.Logger;
055import org.slf4j.LoggerFactory;
056
057import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
058import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
059import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
060
061import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
062import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
063import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
064import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles;
065import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile;
066
067/**
068 * Tracks file archiving and updates the hbase quota table.
069 */
070@InterfaceAudience.Private
071public class FileArchiverNotifierImpl implements FileArchiverNotifier {
072  private static final Logger LOG = LoggerFactory.getLogger(FileArchiverNotifierImpl.class);
073  private final Connection conn;
074  private final Configuration conf;
075  private final FileSystem fs;
076  private final TableName tn;
077  private final ReadLock readLock;
078  private final WriteLock writeLock;
079  private volatile long lastFullCompute = Long.MIN_VALUE;
080  private List<String> currentSnapshots = Collections.emptyList();
081  private static final Map<String,Object> NAMESPACE_LOCKS = new HashMap<>();
082
083  /**
084   * An Exception thrown when SnapshotSize updates to hbase:quota fail to be written.
085   */
086  @InterfaceAudience.Private
087  public static class QuotaSnapshotSizeSerializationException extends IOException {
088    private static final long serialVersionUID = 1L;
089
090    public QuotaSnapshotSizeSerializationException(String msg) {
091      super(msg);
092    }
093  }
094
095  public FileArchiverNotifierImpl(
096      Connection conn, Configuration conf, FileSystem fs, TableName tn) {
097    this.conn = conn;
098    this.conf = conf;
099    this.fs = fs;
100    this.tn = tn;
101    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
102    readLock = lock.readLock();
103    writeLock = lock.writeLock();
104  }
105
106  static synchronized Object getLockForNamespace(String namespace) {
107    return NAMESPACE_LOCKS.computeIfAbsent(namespace, (ns) -> new Object());
108  }
109
110  /**
111   * Returns a strictly-increasing measure of time extracted by {@link System#nanoTime()}.
112   */
113  long getLastFullCompute() {
114    return lastFullCompute;
115  }
116
117  @Override
118  public void addArchivedFiles(Set<Entry<String, Long>> fileSizes) throws IOException {
119    long start = System.nanoTime();
120    readLock.lock();
121    try {
122      // We want to catch the case where we got an archival request, but there was a full
123      // re-computation in progress that was blocking us. Most likely, the full computation is going
124      // to already include the changes we were going to make.
125      //
126      // Same as "start < lastFullCompute" but avoiding numeric overflow per the
127      // System.nanoTime() javadoc
128      if (lastFullCompute != Long.MIN_VALUE && start - lastFullCompute < 0) {
129        if (LOG.isTraceEnabled()) {
130          LOG.trace("A full computation was performed after this request was received."
131              + " Ignoring requested updates: " + fileSizes);
132        }
133        return;
134      }
135
136      if (LOG.isTraceEnabled()) {
137        LOG.trace("currentSnapshots: " + currentSnapshots + " fileSize: "+ fileSizes);
138      }
139
140      // Write increment to quota table for the correct snapshot. Only do this if we have snapshots
141      // and some files that were archived.
142      if (!currentSnapshots.isEmpty() && !fileSizes.isEmpty()) {
143        // We get back the files which no snapshot referenced (the files which will be deleted soon)
144        groupArchivedFiledBySnapshotAndRecordSize(currentSnapshots, fileSizes);
145      }
146    } finally {
147      readLock.unlock();
148    }
149  }
150
151  /**
152   * For each file in the map, this updates the first snapshot (lexicographic snapshot name) that
153   * references this file. The result of this computation is serialized to the quota table.
154   *
155   * @param snapshots A collection of HBase snapshots to group the files into
156   * @param fileSizes A map of file names to their sizes
157   */
158  void groupArchivedFiledBySnapshotAndRecordSize(
159      List<String> snapshots, Set<Entry<String, Long>> fileSizes) throws IOException {
160    // Make a copy as we'll modify it.
161    final Map<String,Long> filesToUpdate = new HashMap<>(fileSizes.size());
162    for (Entry<String,Long> entry : fileSizes) {
163      filesToUpdate.put(entry.getKey(), entry.getValue());
164    }
165    // Track the change in size to each snapshot
166    final Map<String,Long> snapshotSizeChanges = new HashMap<>();
167    for (String snapshot : snapshots) {
168      // For each file in `filesToUpdate`, check if `snapshot` refers to it.
169      // If `snapshot` does, remove it from `filesToUpdate` and add it to `snapshotSizeChanges`.
170      bucketFilesToSnapshot(snapshot, filesToUpdate, snapshotSizeChanges);
171      if (filesToUpdate.isEmpty()) {
172        // If we have no more files recently archived, we have nothing more to check
173        break;
174      }
175    }
176    // We have computed changes to the snapshot size, we need to record them.
177    if (!snapshotSizeChanges.isEmpty()) {
178      if (LOG.isTraceEnabled()) {
179        LOG.trace("Writing snapshot size changes for: " + snapshotSizeChanges);
180      }
181      persistSnapshotSizeChanges(snapshotSizeChanges);
182    }
183  }
184
185  /**
186   * For the given snapshot, find all files which this {@code snapshotName} references. After a file
187   * is found to be referenced by the snapshot, it is removed from {@code filesToUpdate} and
188   * {@code snapshotSizeChanges} is updated in concert.
189   *
190   * @param snapshotName The snapshot to check
191   * @param filesToUpdate A mapping of archived files to their size
192   * @param snapshotSizeChanges A mapping of snapshots and their change in size
193   */
194  void bucketFilesToSnapshot(
195      String snapshotName, Map<String,Long> filesToUpdate, Map<String,Long> snapshotSizeChanges)
196          throws IOException {
197    // A quick check to avoid doing work if the caller unnecessarily invoked this method.
198    if (filesToUpdate.isEmpty()) {
199      return;
200    }
201
202    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(
203        snapshotName, CommonFSUtils.getRootDir(conf));
204    SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
205    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
206    // For each region referenced by the snapshot
207    for (SnapshotRegionManifest rm : manifest.getRegionManifests()) {
208      // For each column family in this region
209      for (FamilyFiles ff : rm.getFamilyFilesList()) {
210        // And each store file in that family
211        for (StoreFile sf : ff.getStoreFilesList()) {
212          Long valueOrNull = filesToUpdate.remove(sf.getName());
213          if (valueOrNull != null) {
214            // This storefile was recently archived, we should update this snapshot with its size
215            snapshotSizeChanges.merge(snapshotName, valueOrNull, Long::sum);
216          }
217          // Short-circuit, if we have no more files that were archived, we don't need to iterate
218          // over the rest of the snapshot.
219          if (filesToUpdate.isEmpty()) {
220            return;
221          }
222        }
223      }
224    }
225  }
226
227  /**
228   * Reads the current size for each snapshot to update, generates a new update based on that value,
229   * and then writes the new update.
230   *
231   * @param snapshotSizeChanges A map of snapshot name to size change
232   */
233  void persistSnapshotSizeChanges(Map<String,Long> snapshotSizeChanges) throws IOException {
234    try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
235      // Create a list (with a more typical ordering implied)
236      final List<Entry<String,Long>> snapshotSizeEntries = new ArrayList<>(
237          snapshotSizeChanges.entrySet());
238      // Create the Gets for each snapshot we need to update
239      final List<Get> snapshotSizeGets = snapshotSizeEntries.stream()
240          .map((e) -> QuotaTableUtil.makeGetForSnapshotSize(tn, e.getKey()))
241          .collect(Collectors.toList());
242      final Iterator<Entry<String,Long>> iterator = snapshotSizeEntries.iterator();
243      // A List to store each Put we'll create from the Get's we retrieve
244      final List<Put> updates = new ArrayList<>(snapshotSizeEntries.size());
245
246      // TODO Push this down to the RegionServer with a coprocessor:
247      //
248      // We would really like to piggy-back on the row-lock already being grabbed
249      // to handle the update of the row in the quota table. However, because the value
250      // is a serialized protobuf, the standard Increment API doesn't work for us. With a CP, we
251      // can just send the size deltas to the RS and atomically update the serialized PB object
252      // while relying on the row-lock for synchronization.
253      //
254      // Synchronizing on the namespace string is a "minor smell" but passable as this is
255      // only invoked via a single caller (the active Master). Using the namespace name lets us
256      // have some parallelism without worry of on caller seeing stale data from the quota table.
257      synchronized (getLockForNamespace(tn.getNamespaceAsString())) {
258        final Result[] existingSnapshotSizes = quotaTable.get(snapshotSizeGets);
259        long totalSizeChange = 0;
260        // Read the current size values (if they exist) to generate the new value
261        for (Result result : existingSnapshotSizes) {
262          Entry<String,Long> entry = iterator.next();
263          String snapshot = entry.getKey();
264          Long size = entry.getValue();
265          // Track the total size change for the namespace this table belongs in
266          totalSizeChange += size;
267          // Get the size of the previous value (or zero)
268          long previousSize = getSnapshotSizeFromResult(result);
269          // Create an update. A file was archived from the table, so the table's size goes
270          // down, but the snapshot's size goes up.
271          updates.add(QuotaTableUtil.createPutForSnapshotSize(tn, snapshot, previousSize + size));
272        }
273
274        // Create an update for the summation of all snapshots in the namespace
275        if (totalSizeChange != 0) {
276          long previousSize = getPreviousNamespaceSnapshotSize(
277              quotaTable, tn.getNamespaceAsString());
278          updates.add(QuotaTableUtil.createPutForNamespaceSnapshotSize(
279              tn.getNamespaceAsString(), previousSize + totalSizeChange));
280        }
281
282        // Send all of the quota table updates in one batch.
283        List<Object> failures = new ArrayList<>();
284        final Object[] results = new Object[updates.size()];
285        quotaTable.batch(updates, results);
286        for (Object result : results) {
287          // A null result is an error condition (all RPC attempts failed)
288          if (!(result instanceof Result)) {
289            failures.add(result);
290          }
291        }
292        // Propagate a failure if any updates failed
293        if (!failures.isEmpty()) {
294          throw new QuotaSnapshotSizeSerializationException(
295              "Failed to write some snapshot size updates: " + failures);
296        }
297      }
298    } catch (InterruptedException e) {
299      Thread.currentThread().interrupt();
300      return;
301    }
302  }
303
304  /**
305   * Fetches the current size of all snapshots in the given {@code namespace}.
306   *
307   * @param quotaTable The HBase quota table
308   * @param namespace Namespace to fetch the sum of snapshot sizes for
309   * @return The size of all snapshot sizes for the namespace in bytes.
310   */
311  long getPreviousNamespaceSnapshotSize(Table quotaTable, String namespace) throws IOException {
312    // Update the size of each snapshot for all snapshots in a namespace.
313    Result r = quotaTable.get(
314        QuotaTableUtil.createGetNamespaceSnapshotSize(namespace));
315    return getSnapshotSizeFromResult(r);
316  }
317
318  /**
319   * Extracts the size component from a serialized {@link SpaceQuotaSnapshot} protobuf.
320   *
321   * @param r A Result containing one cell with a SpaceQuotaSnapshot protobuf
322   * @return The size in bytes of the snapshot.
323   */
324  long getSnapshotSizeFromResult(Result r) throws InvalidProtocolBufferException {
325    // Per javadoc, Result should only be null if an exception was thrown. So, if we're here,
326    // we should be non-null. If we can't advance to the first cell, same as "no cell".
327    if (!r.isEmpty() && r.advance()) {
328      return QuotaTableUtil.parseSnapshotSize(r.current());
329    }
330    return 0L;
331  }
332
333  @Override
334  public long computeAndStoreSnapshotSizes(
335      Collection<String> currentSnapshots) throws IOException {
336    // Record what the current snapshots are
337    this.currentSnapshots = new ArrayList<>(currentSnapshots);
338    Collections.sort(this.currentSnapshots);
339
340    // compute new size for table + snapshots for that table
341    List<SnapshotWithSize> snapshotSizes = computeSnapshotSizes(this.currentSnapshots);
342    if (LOG.isTraceEnabled()) {
343      LOG.trace("Computed snapshot sizes for " + tn + " of " + snapshotSizes);
344    }
345
346    // Compute the total size of all snapshots against our table
347    final long totalSnapshotSize = snapshotSizes.stream().mapToLong((sws) -> sws.getSize()).sum();
348
349    writeLock.lock();
350    try {
351      // Persist the size of each snapshot
352      try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
353        persistSnapshotSizes(quotaTable, snapshotSizes);
354      }
355
356      // Report the last time we did a recomputation
357      lastFullCompute = System.nanoTime();
358
359      return totalSnapshotSize;
360    } finally {
361      writeLock.unlock();
362    }
363  }
364
365  @Override
366  public String toString() {
367    StringBuilder sb = new StringBuilder();
368    sb.append(getClass().getSimpleName()).append("[");
369    sb.append("tableName=").append(tn).append(", currentSnapshots=");
370    sb.append(currentSnapshots).append(", lastFullCompute=").append(lastFullCompute);
371    return sb.append("]").toString();
372  }
373
374  /**
375   * Computes the size of each snapshot against the table referenced by {@code this}.
376   *
377   * @param snapshots A sorted list of snapshots against {@code tn}.
378   * @return A list of the size for each snapshot against {@code tn}.
379   */
380  List<SnapshotWithSize> computeSnapshotSizes(List<String> snapshots) throws IOException {
381    final List<SnapshotWithSize> snapshotSizes = new ArrayList<>(snapshots.size());
382    final Path rootDir = CommonFSUtils.getRootDir(conf);
383
384    // Get the map of store file names to store file path for this table
385    final Set<String> tableReferencedStoreFiles;
386    try {
387      tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet();
388    } catch (InterruptedException e) {
389      Thread.currentThread().interrupt();
390      return null;
391    }
392
393    if (LOG.isTraceEnabled()) {
394      LOG.trace("Paths for " + tn + ": " + tableReferencedStoreFiles);
395    }
396
397    // For each snapshot on this table, get the files which the snapshot references which
398    // the table does not.
399    Set<String> snapshotReferencedFiles = new HashSet<>();
400    for (String snapshotName : snapshots) {
401      Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
402      SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
403      SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
404
405      if (LOG.isTraceEnabled()) {
406        LOG.trace("Files referenced by other snapshots: " + snapshotReferencedFiles);
407      }
408
409      // Get the set of files from the manifest that this snapshot references which are not also
410      // referenced by the originating table.
411      Set<StoreFileReference> unreferencedStoreFileNames = getStoreFilesFromSnapshot(
412          manifest, (sfn) -> !tableReferencedStoreFiles.contains(sfn)
413              && !snapshotReferencedFiles.contains(sfn));
414
415      if (LOG.isTraceEnabled()) {
416        LOG.trace("Snapshot " + snapshotName + " solely references the files: "
417            + unreferencedStoreFileNames);
418      }
419
420      // Compute the size of the store files for this snapshot
421      long size = getSizeOfStoreFiles(tn, unreferencedStoreFileNames);
422      if (LOG.isTraceEnabled()) {
423        LOG.trace("Computed size of " + snapshotName + " to be " + size);
424      }
425
426      // Persist this snapshot's size into the map
427      snapshotSizes.add(new SnapshotWithSize(snapshotName, size));
428
429      // Make sure that we don't double-count the same file
430      for (StoreFileReference ref : unreferencedStoreFileNames) {
431        for (String fileNames : ref.getFamilyToFilesMapping().values()) {
432          snapshotReferencedFiles.add(fileNames);
433        }
434      }
435    }
436
437    return snapshotSizes;
438  }
439
440  /**
441   * Computes the size of each store file in {@code storeFileNames}
442   */
443  long getSizeOfStoreFiles(TableName tn, Set<StoreFileReference> storeFileNames) {
444    return storeFileNames.stream()
445        .collect(Collectors.summingLong((sfr) -> getSizeOfStoreFile(tn, sfr)));
446  }
447
448  /**
449   * Computes the size of the store files for a single region.
450   */
451  long getSizeOfStoreFile(TableName tn, StoreFileReference storeFileName) {
452    String regionName = storeFileName.getRegionName();
453    return storeFileName.getFamilyToFilesMapping()
454        .entries().stream()
455        .collect(Collectors.summingLong((e) ->
456            getSizeOfStoreFile(tn, regionName, e.getKey(), e.getValue())));
457  }
458
459  /**
460   * Computes the size of the store file given its name, region and family name in
461   * the archive directory.
462   */
463  long getSizeOfStoreFile(
464      TableName tn, String regionName, String family, String storeFile) {
465    Path familyArchivePath;
466    try {
467      familyArchivePath = HFileArchiveUtil.getStoreArchivePath(conf, tn, regionName, family);
468    } catch (IOException e) {
469      LOG.warn("Could not compute path for the archive directory for the region", e);
470      return 0L;
471    }
472    Path fileArchivePath = new Path(familyArchivePath, storeFile);
473    try {
474      if (fs.exists(fileArchivePath)) {
475        FileStatus[] status = fs.listStatus(fileArchivePath);
476        if (1 != status.length) {
477          LOG.warn("Expected " + fileArchivePath +
478              " to be a file but was a directory, ignoring reference");
479          return 0L;
480        }
481        return status[0].getLen();
482      }
483    } catch (IOException e) {
484      LOG.warn("Could not obtain the status of " + fileArchivePath, e);
485      return 0L;
486    }
487    LOG.warn("Expected " + fileArchivePath + " to exist but does not, ignoring reference.");
488    return 0L;
489  }
490
491  /**
492   * Extracts the names of the store files referenced by this snapshot which satisfy the given
493   * predicate (the predicate returns {@code true}).
494   */
495  Set<StoreFileReference> getStoreFilesFromSnapshot(
496      SnapshotManifest manifest, Predicate<String> filter) {
497    Set<StoreFileReference> references = new HashSet<>();
498    // For each region referenced by the snapshot
499    for (SnapshotRegionManifest rm : manifest.getRegionManifests()) {
500      StoreFileReference regionReference = new StoreFileReference(
501          ProtobufUtil.toRegionInfo(rm.getRegionInfo()).getEncodedName());
502
503      // For each column family in this region
504      for (FamilyFiles ff : rm.getFamilyFilesList()) {
505        final String familyName = ff.getFamilyName().toStringUtf8();
506        // And each store file in that family
507        for (StoreFile sf : ff.getStoreFilesList()) {
508          String storeFileName = sf.getName();
509          // A snapshot only "inherits" a files size if it uniquely refers to it (no table
510          // and no other snapshot references it).
511          if (filter.test(storeFileName)) {
512            regionReference.addFamilyStoreFile(familyName, storeFileName);
513          }
514        }
515      }
516      // Only add this Region reference if we retained any files.
517      if (!regionReference.getFamilyToFilesMapping().isEmpty()) {
518        references.add(regionReference);
519      }
520    }
521    return references;
522  }
523
524  /**
525   * Writes the snapshot sizes to the provided {@code table}.
526   */
527  void persistSnapshotSizes(
528      Table table, List<SnapshotWithSize> snapshotSizes) throws IOException {
529    // Convert each entry in the map to a Put and write them to the quota table
530    table.put(snapshotSizes
531        .stream()
532        .map(sws -> QuotaTableUtil.createPutForSnapshotSize(
533            tn, sws.getName(), sws.getSize()))
534        .collect(Collectors.toList()));
535  }
536
537  /**
538   * A struct encapsulating the name of a snapshot and its "size" on the filesystem. This size is
539   * defined as the amount of filesystem space taken by the files the snapshot refers to which
540   * the originating table no longer refers to.
541   */
542  static class SnapshotWithSize {
543    private final String name;
544    private final long size;
545
546    SnapshotWithSize(String name, long size) {
547      this.name = Objects.requireNonNull(name);
548      this.size = size;
549    }
550
551    String getName() {
552      return name;
553    }
554
555    long getSize() {
556      return size;
557    }
558
559    @Override
560    public int hashCode() {
561      return new HashCodeBuilder().append(name).append(size).toHashCode();
562    }
563
564    @Override
565    public boolean equals(Object o) {
566      if (this == o) {
567        return true;
568      }
569
570      if (!(o instanceof SnapshotWithSize)) {
571        return false;
572      }
573
574      SnapshotWithSize other = (SnapshotWithSize) o;
575      return name.equals(other.name) && size == other.size;
576    }
577
578    @Override
579    public String toString() {
580      StringBuilder sb = new StringBuilder(32);
581      return sb.append("SnapshotWithSize:[").append(name).append(" ")
582          .append(StringUtils.byteDesc(size)).append("]").toString();
583    }
584  }
585
586  /**
587   * A reference to a collection of files in the archive directory for a single region.
588   */
589  static class StoreFileReference {
590    private final String regionName;
591    private final Multimap<String,String> familyToFiles;
592
593    StoreFileReference(String regionName) {
594      this.regionName = Objects.requireNonNull(regionName);
595      familyToFiles = HashMultimap.create();
596    }
597
598    String getRegionName() {
599      return regionName;
600    }
601
602    Multimap<String,String> getFamilyToFilesMapping() {
603      return familyToFiles;
604    }
605
606    void addFamilyStoreFile(String family, String storeFileName) {
607      familyToFiles.put(family, storeFileName);
608    }
609
610    @Override
611    public int hashCode() {
612      return new HashCodeBuilder().append(regionName).append(familyToFiles).toHashCode();
613    }
614
615    @Override
616    public boolean equals(Object o) {
617      if (this == o) {
618        return true;
619      }
620      if (!(o instanceof StoreFileReference)) {
621        return false;
622      }
623      StoreFileReference other = (StoreFileReference) o;
624      return regionName.equals(other.regionName) && familyToFiles.equals(other.familyToFiles);
625    }
626
627    @Override
628    public String toString() {
629      StringBuilder sb = new StringBuilder();
630      return sb.append("StoreFileReference[region=").append(regionName).append(", files=")
631          .append(familyToFiles).append("]").toString();
632    }
633  }
634}