001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import com.google.protobuf.Descriptors;
021import com.google.protobuf.Message;
022import com.google.protobuf.Service;
023import com.google.protobuf.ServiceException;
024import java.io.Closeable;
025import java.io.IOException;
026import java.util.Collections;
027import java.util.List;
028import java.util.Map;
029import java.util.concurrent.TimeUnit;
030import org.apache.commons.lang3.NotImplementedException;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.hbase.Cell;
033import org.apache.hadoop.hbase.CompareOperator;
034import org.apache.hadoop.hbase.HTableDescriptor;
035import org.apache.hadoop.hbase.TableName;
036import org.apache.hadoop.hbase.client.coprocessor.Batch;
037import org.apache.hadoop.hbase.filter.CompareFilter;
038import org.apache.hadoop.hbase.filter.Filter;
039import org.apache.hadoop.hbase.io.TimeRange;
040import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
041import org.apache.hadoop.hbase.util.Bytes;
042import org.apache.yetus.audience.InterfaceAudience;
043
044/**
045 * Used to communicate with a single HBase table. Obtain an instance from a {@link Connection} and
046 * call {@link #close()} afterwards.
047 * <p>
048 * <code>Table</code> can be used to get, put, delete or scan data from a table.
049 * @see ConnectionFactory
050 * @see Connection
051 * @see Admin
052 * @see RegionLocator
053 * @since 0.99.0
054 */
055@InterfaceAudience.Public
056public interface Table extends Closeable {
057  /**
058   * Gets the fully qualified table name instance of this table.
059   */
060  TableName getName();
061
062  /**
063   * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
064   * <p>
065   * The reference returned is not a copy, so any change made to it will affect this instance.
066   */
067  Configuration getConfiguration();
068
069  /**
070   * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
071   * @throws java.io.IOException if a remote or network exception occurs.
072   * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #getDescriptor()}
073   */
074  @Deprecated
075  default HTableDescriptor getTableDescriptor() throws IOException {
076    TableDescriptor descriptor = getDescriptor();
077
078    if (descriptor instanceof HTableDescriptor) {
079      return (HTableDescriptor) descriptor;
080    } else {
081      return new HTableDescriptor(descriptor);
082    }
083  }
084
085  /**
086   * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this
087   * table.
088   * @throws java.io.IOException if a remote or network exception occurs.
089   */
090  TableDescriptor getDescriptor() throws IOException;
091
092  /**
093   * Gets the {@link RegionLocator} for this table.
094   */
095  RegionLocator getRegionLocator() throws IOException;
096
097  /**
098   * Test for the existence of columns in the table, as specified by the Get.
099   * <p>
100   * This will return true if the Get matches one or more keys, false if not.
101   * <p>
102   * This is a server-side call so it prevents any data from being transfered to the client.
103   * @param get the Get
104   * @return true if the specified Get matches one or more keys, false if not
105   * @throws IOException e
106   */
107  default boolean exists(Get get) throws IOException {
108    return exists(Collections.singletonList(get))[0];
109  }
110
111  /**
112   * Test for the existence of columns in the table, as specified by the Gets.
113   * <p>
114   * This will return an array of booleans. Each value will be true if the related Get matches one
115   * or more keys, false if not.
116   * <p>
117   * This is a server-side call so it prevents any data from being transferred to the client.
118   * @param gets the Gets
119   * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
120   * @throws IOException e
121   */
122  default boolean[] exists(List<Get> gets) throws IOException {
123    throw new NotImplementedException("Add an implementation!");
124  }
125
126  /**
127   * Test for the existence of columns in the table, as specified by the Gets. This will return an
128   * array of booleans. Each value will be true if the related Get matches one or more keys, false
129   * if not. This is a server-side call so it prevents any data from being transferred to the
130   * client.
131   * @param gets the Gets
132   * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
133   * @throws IOException e
134   * @deprecated since 2.0 version and will be removed in 3.0 version. use {@link #exists(List)}
135   */
136  @Deprecated
137  default boolean[] existsAll(List<Get> gets) throws IOException {
138    return exists(gets);
139  }
140
141  /**
142   * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The
143   * ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the
144   * same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the
145   * Put had put.
146   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
147   * @param results Empty Object[], same size as actions. Provides access to partial results, in
148   *                case an exception is thrown. A null in the result array means that the call for
149   *                that action failed, even after retries. The order of the objects in the results
150   *                array corresponds to the order of actions in the request list.
151   * @since 0.90.0
152   */
153  default void batch(final List<? extends Row> actions, final Object[] results)
154    throws IOException, InterruptedException {
155    throw new NotImplementedException("Add an implementation!");
156  }
157
158  /**
159   * Same as {@link #batch(List, Object[])}, but with a callback.
160   * @since 0.96.0
161   */
162  default <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
163    final Batch.Callback<R> callback) throws IOException, InterruptedException {
164    throw new NotImplementedException("Add an implementation!");
165  }
166
167  /**
168   * Extracts certain cells from a given row.
169   * @param get The object that specifies what data to fetch and from which row.
170   * @return The data coming from the specified row, if it exists. If the row specified doesn't
171   *         exist, the {@link Result} instance returned won't contain any
172   *         {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
173   * @throws IOException if a remote or network exception occurs.
174   * @since 0.20.0
175   */
176  default Result get(Get get) throws IOException {
177    return get(Collections.singletonList(get))[0];
178  }
179
180  /**
181   * Extracts specified cells from the given rows, as a batch.
182   * @param gets The objects that specify what data to fetch and from which rows.
183   * @return The data coming from the specified rows, if it exists. If the row specified doesn't
184   *         exist, the {@link Result} instance returned won't contain any
185   *         {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If
186   *         there are any failures even after retries, there will be a <code>null</code> in the
187   *         results' array for those Gets, AND an exception will be thrown. The ordering of the
188   *         Result array corresponds to the order of the list of passed in Gets.
189   * @throws IOException if a remote or network exception occurs.
190   * @since 0.90.0
191   * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently
192   *          {@link #get(List)} doesn't run any validations on the client-side, currently there is
193   *          no need, but this may change in the future. An {@link IllegalArgumentException} will
194   *          be thrown in this case.
195   */
196  default Result[] get(List<Get> gets) throws IOException {
197    throw new NotImplementedException("Add an implementation!");
198  }
199
200  /**
201   * Returns a scanner on the current table as specified by the {@link Scan} object. Note that the
202   * passed {@link Scan}'s start row and caching properties maybe changed.
203   * @param scan A configured {@link Scan} object.
204   * @return A scanner.
205   * @throws IOException if a remote or network exception occurs.
206   * @since 0.20.0
207   */
208  default ResultScanner getScanner(Scan scan) throws IOException {
209    throw new NotImplementedException("Add an implementation!");
210  }
211
212  /**
213   * Gets a scanner on the current table for the given family.
214   * @param family The column family to scan.
215   * @return A scanner.
216   * @throws IOException if a remote or network exception occurs.
217   * @since 0.20.0
218   */
219  default ResultScanner getScanner(byte[] family) throws IOException {
220    throw new NotImplementedException("Add an implementation!");
221  }
222
223  /**
224   * Gets a scanner on the current table for the given family and qualifier.
225   * @param family    The column family to scan.
226   * @param qualifier The column qualifier to scan.
227   * @return A scanner.
228   * @throws IOException if a remote or network exception occurs.
229   * @since 0.20.0
230   */
231  default ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
232    throw new NotImplementedException("Add an implementation!");
233  }
234
235  /**
236   * Puts some data in the table.
237   * @param put The data to put.
238   * @throws IOException if a remote or network exception occurs.
239   * @since 0.20.0
240   */
241  default void put(Put put) throws IOException {
242    put(Collections.singletonList(put));
243  }
244
245  /**
246   * Batch puts the specified data into the table.
247   * <p>
248   * This can be used for group commit, or for submitting user defined batches. Before sending a
249   * batch of mutations to the server, the client runs a few validations on the input list. If an
250   * error is found, for example, a mutation was supplied but was missing it's column an
251   * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there are
252   * any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be thrown.
253   * RetriesExhaustedWithDetailsException contains lists of failed mutations and corresponding
254   * remote exceptions. The ordering of mutations and exceptions in the encapsulating exception
255   * corresponds to the order of the input list of Put requests.
256   * @param puts The list of mutations to apply.
257   * @throws IOException if a remote or network exception occurs.
258   * @since 0.20.0
259   */
260  default void put(List<Put> puts) throws IOException {
261    throw new NotImplementedException("Add an implementation!");
262  }
263
264  /**
265   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
266   * adds the put. If the passed value is null, the check is for the lack of column (ie:
267   * non-existance)
268   * @param row       to check
269   * @param family    column family to check
270   * @param qualifier column qualifier to check
271   * @param value     the expected value
272   * @param put       data to put if check succeeds
273   * @throws IOException e
274   * @return true if the new put was executed, false otherwise
275   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
276   */
277  @Deprecated
278  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
279    throws IOException {
280    return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put);
281  }
282
283  /**
284   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
285   * adds the put. If the passed value is null, the check is for the lack of column (ie:
286   * non-existence) The expected value argument of this call is on the left and the current value of
287   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
288   * expected value > existing <=> add the put.
289   * @param row       to check
290   * @param family    column family to check
291   * @param qualifier column qualifier to check
292   * @param compareOp comparison operator to use
293   * @param value     the expected value
294   * @param put       data to put if check succeeds
295   * @throws IOException e
296   * @return true if the new put was executed, false otherwise
297   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
298   */
299  @Deprecated
300  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
301    CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException {
302    RowMutations mutations = new RowMutations(put.getRow(), 1);
303    mutations.add(put);
304
305    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
306  }
307
308  /**
309   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
310   * adds the put. If the passed value is null, the check is for the lack of column (ie:
311   * non-existence) The expected value argument of this call is on the left and the current value of
312   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
313   * expected value > existing <=> add the put.
314   * @param row       to check
315   * @param family    column family to check
316   * @param qualifier column qualifier to check
317   * @param op        comparison operator to use
318   * @param value     the expected value
319   * @param put       data to put if check succeeds
320   * @throws IOException e
321   * @return true if the new put was executed, false otherwise
322   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
323   */
324  @Deprecated
325  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
326    byte[] value, Put put) throws IOException {
327    RowMutations mutations = new RowMutations(put.getRow(), 1);
328    mutations.add(put);
329
330    return checkAndMutate(row, family, qualifier, op, value, mutations);
331  }
332
333  /**
334   * Deletes the specified cells/row.
335   * @param delete The object that specifies what to delete.
336   * @throws IOException if a remote or network exception occurs.
337   * @since 0.20.0
338   */
339  default void delete(Delete delete) throws IOException {
340    throw new NotImplementedException("Add an implementation!");
341  }
342
343  /**
344   * Batch Deletes the specified cells/rows from the table.
345   * <p>
346   * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no
347   * exception will be thrown. If there are any failures even after retries, a
348   * {@link RetriesExhaustedWithDetailsException} will be thrown.
349   * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding
350   * remote exceptions.
351   * @param deletes List of things to delete. The input list gets modified by this method. All
352   *                successfully applied {@link Delete}s in the list are removed (in particular it
353   *                gets re-ordered, so the order in which the elements are inserted in the list
354   *                gives no guarantee as to the order in which the {@link Delete}s are executed).
355   * @throws IOException if a remote or network exception occurs. In that case the {@code deletes}
356   *                     argument will contain the {@link Delete} instances that have not be
357   *                     successfully applied.
358   * @since 0.20.1
359   * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
360   *          {@link #put(List)} runs pre-flight validations on the input list on client. Currently
361   *          {@link #delete(List)} doesn't run validations on the client, there is no need
362   *          currently, but this may change in the future. An {@link IllegalArgumentException} will
363   *          be thrown in this case.
364   */
365  default void delete(List<Delete> deletes) throws IOException {
366    throw new NotImplementedException("Add an implementation!");
367  }
368
369  /**
370   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
371   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
372   * non-existance)
373   * @param row       to check
374   * @param family    column family to check
375   * @param qualifier column qualifier to check
376   * @param value     the expected value
377   * @param delete    data to delete if check succeeds
378   * @throws IOException e
379   * @return true if the new delete was executed, false otherwise
380   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
381   */
382  @Deprecated
383  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
384    Delete delete) throws IOException {
385    return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete);
386  }
387
388  /**
389   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
390   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
391   * non-existence) The expected value argument of this call is on the left and the current value of
392   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
393   * expected value > existing <=> add the delete.
394   * @param row       to check
395   * @param family    column family to check
396   * @param qualifier column qualifier to check
397   * @param compareOp comparison operator to use
398   * @param value     the expected value
399   * @param delete    data to delete if check succeeds
400   * @throws IOException e
401   * @return true if the new delete was executed, false otherwise
402   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
403   */
404  @Deprecated
405  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
406    CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException {
407    RowMutations mutations = new RowMutations(delete.getRow(), 1);
408    mutations.add(delete);
409
410    return checkAndMutate(row, family, qualifier, compareOp, value, mutations);
411  }
412
413  /**
414   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
415   * adds the delete. If the passed value is null, the check is for the lack of column (ie:
416   * non-existence) The expected value argument of this call is on the left and the current value of
417   * the cell is on the right side of the comparison operator. Ie. eg. GREATER operator means
418   * expected value > existing <=> add the delete.
419   * @param row       to check
420   * @param family    column family to check
421   * @param qualifier column qualifier to check
422   * @param op        comparison operator to use
423   * @param value     the expected value
424   * @param delete    data to delete if check succeeds
425   * @throws IOException e
426   * @return true if the new delete was executed, false otherwise
427   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
428   */
429  @Deprecated
430  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
431    byte[] value, Delete delete) throws IOException {
432    RowMutations mutations = new RowMutations(delete.getRow(), 1);
433    mutations.add(delete);
434
435    return checkAndMutate(row, family, qualifier, op, value, mutations);
436  }
437
438  /**
439   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
440   * adds the Put/Delete/RowMutations.
441   * <p>
442   * Use the returned {@link CheckAndMutateBuilder} to construct your request and then execute it.
443   * This is a fluent style API, the code is like:
444   *
445   * <pre>
446   * <code>
447   * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put);
448   * </code>
449   * </pre>
450   *
451   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
452   *             any more.
453   */
454  @Deprecated
455  default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
456    throw new NotImplementedException("Add an implementation!");
457  }
458
459  /**
460   * A helper class for sending checkAndMutate request.
461   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
462   *             any more.
463   */
464  @Deprecated
465  interface CheckAndMutateBuilder {
466
467    /**
468     * Specify a column qualifer
469     * @param qualifier column qualifier to check.
470     */
471    CheckAndMutateBuilder qualifier(byte[] qualifier);
472
473    /**
474     * Specify a timerange
475     * @param timeRange timeRange to check
476     */
477    CheckAndMutateBuilder timeRange(TimeRange timeRange);
478
479    /**
480     * Check for lack of column.
481     */
482    CheckAndMutateBuilder ifNotExists();
483
484    /**
485     * Check for equality.
486     * @param value the expected value
487     */
488    default CheckAndMutateBuilder ifEquals(byte[] value) {
489      return ifMatches(CompareOperator.EQUAL, value);
490    }
491
492    /**
493     * Check for match.
494     * @param compareOp comparison operator to use
495     * @param value     the expected value
496     */
497    CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value);
498
499    /**
500     * Specify a Put to commit if the check succeeds.
501     * @param put data to put if check succeeds
502     * @return {@code true} if the new put was executed, {@code false} otherwise.
503     */
504    boolean thenPut(Put put) throws IOException;
505
506    /**
507     * Specify a Delete to commit if the check succeeds.
508     * @param delete data to delete if check succeeds
509     * @return {@code true} if the new delete was executed, {@code false} otherwise.
510     */
511    boolean thenDelete(Delete delete) throws IOException;
512
513    /**
514     * Specify a RowMutations to commit if the check succeeds.
515     * @param mutation mutations to perform if check succeeds
516     * @return true if the new mutation was executed, false otherwise.
517     */
518    boolean thenMutate(RowMutations mutation) throws IOException;
519  }
520
521  /**
522   * Atomically checks if a row matches the specified filter. If it does, it adds the
523   * Put/Delete/RowMutations.
524   * <p>
525   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then
526   * execute it. This is a fluent style API, the code is like:
527   *
528   * <pre>
529   * <code>
530   * table.checkAndMutate(row, filter).thenPut(put);
531   * </code>
532   * </pre>
533   *
534   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
535   *             any more.
536   */
537  @Deprecated
538  default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) {
539    throw new NotImplementedException("Add an implementation!");
540  }
541
542  /**
543   * A helper class for sending checkAndMutate request with a filter.
544   * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it
545   *             any more.
546   */
547  @Deprecated
548  interface CheckAndMutateWithFilterBuilder {
549
550    /**
551     * Specify a timerange.
552     * @param timeRange timeRange to check
553     */
554    CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
555
556    /**
557     * Specify a Put to commit if the check succeeds.
558     * @param put data to put if check succeeds
559     * @return {@code true} if the new put was executed, {@code false} otherwise.
560     */
561    boolean thenPut(Put put) throws IOException;
562
563    /**
564     * Specify a Delete to commit if the check succeeds.
565     * @param delete data to delete if check succeeds
566     * @return {@code true} if the new delete was executed, {@code false} otherwise.
567     */
568    boolean thenDelete(Delete delete) throws IOException;
569
570    /**
571     * Specify a RowMutations to commit if the check succeeds.
572     * @param mutation mutations to perform if check succeeds
573     * @return true if the new mutation was executed, false otherwise.
574     */
575    boolean thenMutate(RowMutations mutation) throws IOException;
576  }
577
578  /**
579   * checkAndMutate that atomically checks if a row matches the specified condition. If it does, it
580   * performs the specified action.
581   * @param checkAndMutate The CheckAndMutate object.
582   * @return A CheckAndMutateResult object that represents the result for the CheckAndMutate.
583   * @throws IOException if a remote or network exception occurs.
584   */
585  default CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException {
586    return checkAndMutate(Collections.singletonList(checkAndMutate)).get(0);
587  }
588
589  /**
590   * Batch version of checkAndMutate. The specified CheckAndMutates are batched only in the sense
591   * that they are sent to a RS in one RPC, but each CheckAndMutate operation is still executed
592   * atomically (and thus, each may fail independently of others).
593   * @param checkAndMutates The list of CheckAndMutate.
594   * @return A list of CheckAndMutateResult objects that represents the result for each
595   *         CheckAndMutate.
596   * @throws IOException if a remote or network exception occurs.
597   */
598  default List<CheckAndMutateResult> checkAndMutate(List<CheckAndMutate> checkAndMutates)
599    throws IOException {
600    throw new NotImplementedException("Add an implementation!");
601  }
602
603  /**
604   * Performs multiple mutations atomically on a single row. Currently {@link Put} and
605   * {@link Delete} are supported.
606   * @param rm object that specifies the set of mutations to perform atomically
607   * @return results of Increment/Append operations
608   * @throws IOException if a remote or network exception occurs.
609   */
610  default Result mutateRow(final RowMutations rm) throws IOException {
611    throw new NotImplementedException("Add an implementation!");
612  }
613
614  /**
615   * Appends values to one or more columns within a single row.
616   * <p>
617   * This operation guaranteed atomicity to readers. Appends are done under a single row lock, so
618   * write operations to a row are synchronized, and readers are guaranteed to see this operation
619   * fully completed.
620   * @param append object that specifies the columns and values to be appended
621   * @throws IOException e
622   * @return values of columns after the append operation (maybe null)
623   */
624  default Result append(final Append append) throws IOException {
625    throw new NotImplementedException("Add an implementation!");
626  }
627
628  /**
629   * Increments one or more columns within a single row.
630   * <p>
631   * This operation ensures atomicity to readers. Increments are done under a single row lock, so
632   * write operations to a row are synchronized, and readers are guaranteed to see this operation
633   * fully completed.
634   * @param increment object that specifies the columns and amounts to be used for the increment
635   *                  operations
636   * @throws IOException e
637   * @return values of columns after the increment
638   */
639  default Result increment(final Increment increment) throws IOException {
640    throw new NotImplementedException("Add an implementation!");
641  }
642
643  /**
644   * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
645   * <p>
646   * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
647   * @param row       The row that contains the cell to increment.
648   * @param family    The column family of the cell to increment.
649   * @param qualifier The column qualifier of the cell to increment.
650   * @param amount    The amount to increment the cell with (or decrement, if the amount is
651   *                  negative).
652   * @return The new value, post increment.
653   * @throws IOException if a remote or network exception occurs.
654   */
655  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
656    throws IOException {
657    Increment increment = new Increment(row).addColumn(family, qualifier, amount);
658    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
659    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
660  }
661
662  /**
663   * Atomically increments a column value. If the column value already exists and is not a
664   * big-endian long, this could throw an exception. If the column value does not yet exist it is
665   * initialized to <code>amount</code> and written to the specified column.
666   * <p>
667   * Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
668   * any increments that have not been flushed.
669   * @param row        The row that contains the cell to increment.
670   * @param family     The column family of the cell to increment.
671   * @param qualifier  The column qualifier of the cell to increment.
672   * @param amount     The amount to increment the cell with (or decrement, if the amount is
673   *                   negative).
674   * @param durability The persistence guarantee for this increment.
675   * @return The new value, post increment.
676   * @throws IOException if a remote or network exception occurs.
677   */
678  default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
679    Durability durability) throws IOException {
680    Increment increment =
681      new Increment(row).addColumn(family, qualifier, amount).setDurability(durability);
682    Cell cell = increment(increment).getColumnLatestCell(family, qualifier);
683    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
684  }
685
686  /**
687   * Releases any resources held or pending changes in internal buffers.
688   * @throws IOException if a remote or network exception occurs.
689   */
690  @Override
691  default void close() throws IOException {
692    throw new NotImplementedException("Add an implementation!");
693  }
694
695  /**
696   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table
697   * region containing the specified row. The row given does not actually have to exist. Whichever
698   * region would contain the row based on start and end keys will be used. Note that the
699   * {@code row} parameter is also not passed to the coprocessor handler registered for this
700   * protocol, unless the {@code row} is separately passed as an argument in the service request.
701   * The parameter here is only used to locate the region used to handle the call.
702   * <p>
703   * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
704   * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
705   * </p>
706   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
707   *
708   * <pre>
709   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
710   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
711   * MyCallRequest request = MyCallRequest.newBuilder()
712   *     ...
713   *     .build();
714   * MyCallResponse response = service.myCall(null, request);
715   * </pre>
716   *
717   * </blockquote></div>
718   * @param row The row key used to identify the remote region location
719   * @return A CoprocessorRpcChannel instance
720   */
721  default CoprocessorRpcChannel coprocessorService(byte[] row) {
722    throw new NotImplementedException("Add an implementation!");
723  }
724
725  /**
726   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
727   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
728   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
729   * with each {@link com.google.protobuf.Service} instance.
730   * @param service  the protocol buffer {@code Service} implementation to call
731   * @param startKey start region selection with region containing this row. If {@code null}, the
732   *                 selection will start with the first table region.
733   * @param endKey   select regions up to and including the region containing this row. If
734   *                 {@code null}, selection will continue through the last table region.
735   * @param callable this instance's
736   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
737   *                 be invoked once per table region, using the {@link com.google.protobuf.Service}
738   *                 instance connected to that region.
739   * @param <T>      the {@link com.google.protobuf.Service} subclass to connect to
740   * @param <R>      Return type for the {@code callable} parameter's
741   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
742   * @return a map of result values keyed by region name
743   */
744  default <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service,
745    byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable)
746    throws ServiceException, Throwable {
747    throw new NotImplementedException("Add an implementation!");
748  }
749
750  /**
751   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
752   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
753   * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
754   * with each {@link Service} instance.
755   * <p>
756   * The given
757   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
758   * method will be called with the return value from each region's
759   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
760   * </p>
761   * @param service  the protocol buffer {@code Service} implementation to call
762   * @param startKey start region selection with region containing this row. If {@code null}, the
763   *                 selection will start with the first table region.
764   * @param endKey   select regions up to and including the region containing this row. If
765   *                 {@code null}, selection will continue through the last table region.
766   * @param callable this instance's
767   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
768   *                 be invoked once per table region, using the {@link Service} instance connected
769   *                 to that region.
770   * @param <T>      the {@link Service} subclass to connect to
771   * @param <R>      Return type for the {@code callable} parameter's
772   *                 {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
773   */
774  default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
775    byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
776    throws ServiceException, Throwable {
777    throw new NotImplementedException("Add an implementation!");
778  }
779
780  /**
781   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
782   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
783   * the invocations to the same region server will be batched into one call. The coprocessor
784   * service is invoked according to the service instance, method name and parameters. the
785   * descriptor for the protobuf service method to call. the method call parameters start region
786   * selection with region containing this row. If {@code null}, the selection will start with the
787   * first table region. select regions up to and including the region containing this row. If
788   * {@code null}, selection will continue through the last table region. the proto type of the
789   * response of the method in Service.
790   * @param <R> the response type for the coprocessor Service method
791   * @return a map of result values keyed by region name
792   */
793  default <R extends Message> Map<byte[], R> batchCoprocessorService(
794    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
795    R responsePrototype) throws ServiceException, Throwable {
796    throw new NotImplementedException("Add an implementation!");
797  }
798
799  /**
800   * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
801   * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
802   * the invocations to the same region server will be batched into one call. The coprocessor
803   * service is invoked according to the service instance, method name and parameters.
804   * <p>
805   * The given
806   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
807   * method will be called with the return value from each region's invocation.
808   * </p>
809   * @param methodDescriptor  the descriptor for the protobuf service method to call.
810   * @param request           the method call parameters
811   * @param startKey          start region selection with region containing this row. If
812   *                          {@code null}, the selection will start with the first table region.
813   * @param endKey            select regions up to and including the region containing this row. If
814   *                          {@code null}, selection will continue through the last table region.
815   * @param responsePrototype the proto type of the response of the method in Service.
816   * @param callback          callback to invoke with the response for each region
817   * @param <R>               the response type for the coprocessor Service method
818   */
819  default <R extends Message> void batchCoprocessorService(
820    Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
821    R responsePrototype, Batch.Callback<R> callback) throws ServiceException, Throwable {
822    throw new NotImplementedException("Add an implementation!");
823  }
824
825  /**
826   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
827   * performs the row mutations. If the passed value is null, the check is for the lack of column
828   * (ie: non-existence) The expected value argument of this call is on the left and the current
829   * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator
830   * means expected value > existing <=> perform row mutations.
831   * @param row       to check
832   * @param family    column family to check
833   * @param qualifier column qualifier to check
834   * @param compareOp the comparison operator
835   * @param value     the expected value
836   * @param mutation  mutations to perform if check succeeds
837   * @throws IOException e
838   * @return true if the new put was executed, false otherwise
839   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
840   */
841  @Deprecated
842  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
843    CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException {
844    throw new NotImplementedException("Add an implementation!");
845  }
846
847  /**
848   * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
849   * performs the row mutations. If the passed value is null, the check is for the lack of column
850   * (ie: non-existence) The expected value argument of this call is on the left and the current
851   * value of the cell is on the right side of the comparison operator. Ie. eg. GREATER operator
852   * means expected value > existing <=> perform row mutations.
853   * @param row       to check
854   * @param family    column family to check
855   * @param qualifier column qualifier to check
856   * @param op        the comparison operator
857   * @param value     the expected value
858   * @param mutation  mutations to perform if check succeeds
859   * @throws IOException e
860   * @return true if the new put was executed, false otherwise
861   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
862   */
863  @Deprecated
864  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
865    byte[] value, RowMutations mutation) throws IOException {
866    throw new NotImplementedException("Add an implementation!");
867  }
868
869  /**
870   * Get timeout of each rpc request in this Table instance. It will be overridden by a more
871   * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
872   * @see #getReadRpcTimeout(TimeUnit)
873   * @see #getWriteRpcTimeout(TimeUnit)
874   * @param unit the unit of time the timeout to be represented in
875   * @return rpc timeout in the specified time unit
876   */
877  default long getRpcTimeout(TimeUnit unit) {
878    throw new NotImplementedException("Add an implementation!");
879  }
880
881  /**
882   * Get timeout (millisecond) of each rpc request in this Table instance.
883   * @return Currently configured read timeout
884   * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or {@link #getWriteRpcTimeout(TimeUnit)}
885   *             instead
886   */
887  @Deprecated
888  default int getRpcTimeout() {
889    return (int) getRpcTimeout(TimeUnit.MILLISECONDS);
890  }
891
892  /**
893   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
894   * override the value of hbase.rpc.timeout in configuration. If a rpc request waiting too long, it
895   * will stop waiting and send a new request to retry until retries exhausted or operation timeout
896   * reached.
897   * <p>
898   * NOTE: This will set both the read and write timeout settings to the provided value.
899   * @param rpcTimeout the timeout of each rpc request in millisecond.
900   * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
901   */
902  @Deprecated
903  default void setRpcTimeout(int rpcTimeout) {
904    setReadRpcTimeout(rpcTimeout);
905    setWriteRpcTimeout(rpcTimeout);
906  }
907
908  /**
909   * Get timeout of each rpc read request in this Table instance.
910   * @param unit the unit of time the timeout to be represented in
911   * @return read rpc timeout in the specified time unit
912   */
913  default long getReadRpcTimeout(TimeUnit unit) {
914    throw new NotImplementedException("Add an implementation!");
915  }
916
917  /**
918   * Get timeout (millisecond) of each rpc read request in this Table instance.
919   * @deprecated since 2.0 and will be removed in 3.0 version use
920   *             {@link #getReadRpcTimeout(TimeUnit)} instead
921   */
922  @Deprecated
923  default int getReadRpcTimeout() {
924    return (int) getReadRpcTimeout(TimeUnit.MILLISECONDS);
925  }
926
927  /**
928   * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
929   * override the value of hbase.rpc.read.timeout in configuration. If a rpc read request waiting
930   * too long, it will stop waiting and send a new request to retry until retries exhausted or
931   * operation timeout reached.
932   * @param readRpcTimeout the timeout for read rpc request in milliseconds
933   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
934   */
935  @Deprecated
936  default void setReadRpcTimeout(int readRpcTimeout) {
937    throw new NotImplementedException("Add an implementation!");
938  }
939
940  /**
941   * Get timeout of each rpc write request in this Table instance.
942   * @param unit the unit of time the timeout to be represented in
943   * @return write rpc timeout in the specified time unit
944   */
945  default long getWriteRpcTimeout(TimeUnit unit) {
946    throw new NotImplementedException("Add an implementation!");
947  }
948
949  /**
950   * Get timeout (millisecond) of each rpc write request in this Table instance.
951   * @deprecated since 2.0 and will be removed in 3.0 version use
952   *             {@link #getWriteRpcTimeout(TimeUnit)} instead
953   */
954  @Deprecated
955  default int getWriteRpcTimeout() {
956    return (int) getWriteRpcTimeout(TimeUnit.MILLISECONDS);
957  }
958
959  /**
960   * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
961   * override the value of hbase.rpc.write.timeout in configuration. If a rpc write request waiting
962   * too long, it will stop waiting and send a new request to retry until retries exhausted or
963   * operation timeout reached.
964   * @param writeRpcTimeout the timeout for write rpc request in milliseconds
965   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
966   */
967  @Deprecated
968  default void setWriteRpcTimeout(int writeRpcTimeout) {
969    throw new NotImplementedException("Add an implementation!");
970  }
971
972  /**
973   * Get timeout of each operation in Table instance.
974   * @param unit the unit of time the timeout to be represented in
975   * @return operation rpc timeout in the specified time unit
976   */
977  default long getOperationTimeout(TimeUnit unit) {
978    throw new NotImplementedException("Add an implementation!");
979  }
980
981  /**
982   * Get timeout (millisecond) of each operation for in Table instance.
983   * @deprecated since 2.0 and will be removed in 3.0 version use
984   *             {@link #getOperationTimeout(TimeUnit)} instead
985   */
986  @Deprecated
987  default int getOperationTimeout() {
988    return (int) getOperationTimeout(TimeUnit.MILLISECONDS);
989  }
990
991  /**
992   * Set timeout (millisecond) of each operation in this Table instance, will override the value of
993   * hbase.client.operation.timeout in configuration. Operation timeout is a top-level restriction
994   * that makes sure a blocking method will not be blocked more than this. In each operation, if rpc
995   * request fails because of timeout or other reason, it will retry until success or throw a
996   * RetriesExhaustedException. But if the total time being blocking reach the operation timeout
997   * before retries exhausted, it will break early and throw SocketTimeoutException.
998   * @param operationTimeout the total timeout of each operation in millisecond.
999   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
1000   */
1001  @Deprecated
1002  default void setOperationTimeout(int operationTimeout) {
1003    throw new NotImplementedException("Add an implementation!");
1004  }
1005
1006  /**
1007   * Get the attributes to be submitted with requests
1008   * @return map of request attributes
1009   */
1010  default Map<String, byte[]> getRequestAttributes() {
1011    throw new NotImplementedException("Add an implementation!");
1012  }
1013}